diff --git a/.changelog/unreleased/SDK/1868-shared-utils-folder.md b/.changelog/unreleased/SDK/1868-shared-utils-folder.md new file mode 100644 index 00000000000..67af881b6ff --- /dev/null +++ b/.changelog/unreleased/SDK/1868-shared-utils-folder.md @@ -0,0 +1,21 @@ +- The shared-utils topic ([#1868](https://github.com/anoma/namada/pull/1868)) moves the following: + + _Modules_ + | From | To | + |-----------------------------------------|--------------------------------------| + | namada::ledger::tx | namada::sdk::tx | + | namada::ledger::rpc | namada::sdk::rpc | + | namada::ledger::signing | namada::sdk::signing | + | namada::ledger::masp | namada::sdk::masp | + | namada::ledger::args | namada::sdk::args | + | namada::ledger::wallet::alias | namada::sdk::wallet::alias | + | namada::ledger::wallet::derivation_path | namada::sdk::wallet::derivation_path | + | namada::ledger::wallet::keys | namada::sdk::wallet::keys | + | namada::ledger::wallet::pre_genesis | namada::sdk::wallet::pre_genesis | + | namada::ledger::wallet::store | namada::sdk::wallet::store | + | namada::types::error | namada::sdk::error | + + + _Types_ + + | From | To | + |---------------------------------|------------------------------| + | namada::ledger::queires::Client | namada::sdk::queires::Client | diff --git a/.changelog/unreleased/bug-fixes/1893-bridge-pool-roots-signing.md b/.changelog/unreleased/bug-fixes/1893-bridge-pool-roots-signing.md new file mode 100644 index 00000000000..70e8e3d662b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1893-bridge-pool-roots-signing.md @@ -0,0 +1,2 @@ +- Never overwrite recent Bridge pool proofs in storage + ([\#1893](https://github.com/anoma/namada/pull/1893)) \ No newline at end of file diff --git a/.changelog/unreleased/features/1746-generic-io.md b/.changelog/unreleased/features/1746-generic-io.md new file mode 100644 index 00000000000..ff469ec8635 --- /dev/null +++ b/.changelog/unreleased/features/1746-generic-io.md @@ -0,0 +1,2 @@ +- Replaced standard IO in SDK and client code with a trait that allows custom + handling. ([\#1746](https://github.com/anoma/namada/pull/1746)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1865-eth-voting-power.md b/.changelog/unreleased/improvements/1865-eth-voting-power.md new file mode 100644 index 00000000000..d9fd126c210 --- /dev/null +++ b/.changelog/unreleased/improvements/1865-eth-voting-power.md @@ -0,0 +1,2 @@ +- Rework voting on Ethereum tallies across epoch boundaries + ([\#1865](https://github.com/anoma/namada/pull/1865)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1868-shared-utils-folder.md b/.changelog/unreleased/improvements/1868-shared-utils-folder.md new file mode 100644 index 00000000000..40d45529b4e --- /dev/null +++ b/.changelog/unreleased/improvements/1868-shared-utils-folder.md @@ -0,0 +1,2 @@ +- Move all functions considered to be apart of the SDK to the SDK + folder. ([#1868](https://github.com/anoma/namada/pull/1868)) diff --git a/.changelog/unreleased/improvements/1874-remove-redundant-struct.md b/.changelog/unreleased/improvements/1874-remove-redundant-struct.md new file mode 100644 index 00000000000..c1eb946e8b6 --- /dev/null +++ b/.changelog/unreleased/improvements/1874-remove-redundant-struct.md @@ -0,0 +1,2 @@ +- Removed redundant `WasmPayload` enum in favor of `Commitment`. + ([\#1874](https://github.com/anoma/namada/pull/1874)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1876-updating-contributing.md b/.changelog/unreleased/improvements/1876-updating-contributing.md new file mode 100644 index 00000000000..871c666e413 --- /dev/null +++ b/.changelog/unreleased/improvements/1876-updating-contributing.md @@ -0,0 +1,2 @@ +- Added a section in CONTRIBUTING.md to outline how to document SDK + changes ([#1876](https://github.com/anoma/namada/pull/1876)) diff --git a/.changelog/unreleased/improvements/1877-refactor-get-fee-unshield.md b/.changelog/unreleased/improvements/1877-refactor-get-fee-unshield.md new file mode 100644 index 00000000000..8db436c7bdd --- /dev/null +++ b/.changelog/unreleased/improvements/1877-refactor-get-fee-unshield.md @@ -0,0 +1,2 @@ +- Refactored retrieval of `Transaction` object for fee unshielding. + ([\#1877](https://github.com/anoma/namada/pull/1877)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1884-multisig-fixes.md b/.changelog/unreleased/improvements/1884-multisig-fixes.md new file mode 100644 index 00000000000..3c380fb3f4b --- /dev/null +++ b/.changelog/unreleased/improvements/1884-multisig-fixes.md @@ -0,0 +1,2 @@ +- Enable hardware wallets to participate in nondegenerate multisignature + transactions. ([\#1884](https://github.com/anoma/namada/pull/1884)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1886-validators-by-hostnames.md b/.changelog/unreleased/improvements/1886-validators-by-hostnames.md new file mode 100644 index 00000000000..fc96da016eb --- /dev/null +++ b/.changelog/unreleased/improvements/1886-validators-by-hostnames.md @@ -0,0 +1,2 @@ +- Added support for validators' hostnames in configuration. + ([\#1886](https://github.com/anoma/namada/pull/1886)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1892-bridge-pool-zero-fees.md b/.changelog/unreleased/improvements/1892-bridge-pool-zero-fees.md new file mode 100644 index 00000000000..83ff3dd0f82 --- /dev/null +++ b/.changelog/unreleased/improvements/1892-bridge-pool-zero-fees.md @@ -0,0 +1,2 @@ +- Allow Bridge pool transfers to pay zero gas fees + ([\#1892](https://github.com/anoma/namada/pull/1892)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1899-retransmit-expired-eth-events.md b/.changelog/unreleased/improvements/1899-retransmit-expired-eth-events.md new file mode 100644 index 00000000000..85c97580906 --- /dev/null +++ b/.changelog/unreleased/improvements/1899-retransmit-expired-eth-events.md @@ -0,0 +1,2 @@ +- Retransmit timed out Ethereum events in case they have accumulated >1/3 voting + power ([\#1899](https://github.com/anoma/namada/pull/1899)) \ No newline at end of file diff --git a/.changelog/unreleased/miscellaneous/1885-migrate-new-contracts.md b/.changelog/unreleased/miscellaneous/1885-migrate-new-contracts.md new file mode 100644 index 00000000000..42e75fb5a3a --- /dev/null +++ b/.changelog/unreleased/miscellaneous/1885-migrate-new-contracts.md @@ -0,0 +1,2 @@ +- Migrate to the new Ethereum contracts + ([\#1885](https://github.com/anoma/namada/pull/1885)) \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5798b2c71c7..233a9333501 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,6 +24,7 @@ The section should either be one of the following choices: - `features` - `improvements` - `testing` +- `SDK` To add a change log entry using `unclog`, you can fill in the following command (prefer to use the issue number, for which the `--pull-request` argument may be omitted): @@ -47,6 +48,13 @@ If none of the sections fit, new sections may be added. To find the existing sec for i in $(ls -d .changelog/*/*/); do basename "$i"; done | sort | uniq ``` +#### SDK Changelog + +The Namada SDK is exposed to any developer building upon Namada. Thus, any change made to a public facing function is a breaking change, and therefore should be documented in the Changelog under the `SDK` section. + +The message should outline the exact API change, along with a small section describing *how* and *why* the componenet was change. This should give motivation and context to any developer building upon Namada on how they can update their code to the next version. + ## Development priorities + If you’d like to follow the development or contribute with new or unimplemented features, we recommend to check [the issues](https://github.com/anoma/namada/issues) that are in current focus of the ledger team. diff --git a/Cargo.lock b/Cargo.lock index 143043ddcbd..2e205d26fc9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2106,8 +2106,8 @@ dependencies = [ [[package]] name = "ethbridge-bridge-contract" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethbridge-bridge-events", "ethbridge-structs", @@ -2117,8 +2117,8 @@ dependencies = [ [[package]] name = "ethbridge-bridge-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethabi", "ethbridge-structs", @@ -2128,40 +2128,17 @@ dependencies = [ [[package]] name = "ethbridge-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethbridge-bridge-events", - "ethbridge-governance-events", "ethers", ] -[[package]] -name = "ethbridge-governance-contract" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" -dependencies = [ - "ethbridge-governance-events", - "ethbridge-structs", - "ethers", - "ethers-contract", -] - -[[package]] -name = "ethbridge-governance-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" -dependencies = [ - "ethabi", - "ethbridge-structs", - "ethers", - "ethers-contract", -] - [[package]] name = "ethbridge-structs" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethabi", "ethers", @@ -3485,7 +3462,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.37.13", + "rustix 0.37.1", "windows-sys 0.48.0", ] @@ -4088,7 +4065,6 @@ dependencies = [ "derivation-path", "derivative", "ethbridge-bridge-contract", - "ethbridge-governance-contract", "ethers", "eyre", "futures", @@ -4167,7 +4143,6 @@ dependencies = [ "ethabi", "ethbridge-bridge-events", "ethbridge-events", - "ethbridge-governance-events", "eyre", "fd-lock", "ferveo", @@ -5851,16 +5826,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.13" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79bef90eb6d984c72722595b5b1348ab39275a5e5123faca6863bf07d75a4e0" +checksum = "d4790277f605573dd24b6751701e0823582a63c7cafc095e427e6c66e45dd75e" dependencies = [ "bitflags 1.2.1", "errno", "io-lifetimes", "libc", "linux-raw-sys 0.3.7", - "windows-sys 0.48.0", + "windows-sys 0.45.0", ] [[package]] @@ -6623,7 +6598,7 @@ dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.13", + "rustix 0.37.1", "windows-sys 0.45.0", ] diff --git a/Cargo.toml b/Cargo.toml index 48141a0f775..e1f3124fbf1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,12 +67,10 @@ directories = "4.0.1" ed25519-consensus = "1.2.0" escargot = "0.5.7" ethabi = "18.0.0" -ethbridge-bridge-contract = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.23.0"} -ethbridge-bridge-events = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.23.0"} -ethbridge-events = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.23.0"} -ethbridge-governance-contract = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.23.0"} -ethbridge-governance-events = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.23.0"} -ethbridge-structs = { git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.23.0" } +ethbridge-bridge-contract = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.24.0"} +ethbridge-bridge-events = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.24.0"} +ethbridge-events = {git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.24.0"} +ethbridge-structs = { git = "https://github.com/heliaxdev/ethbridge-rs", tag = "v0.24.0" } ethers = "2.0.0" expectrl = "0.7.0" eyre = "0.6.5" diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 16fe028be33..1d33f55df75 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -89,7 +89,6 @@ ed25519-consensus.workspace = true ethabi.workspace = true ethbridge-bridge-events.workspace = true ethbridge-events.workspace = true -ethbridge-governance-events.workspace = true eyre.workspace = true fd-lock.workspace = true ferveo-common.workspace = true diff --git a/apps/src/bin/namada-client/cli.rs b/apps/src/bin/namada-client/cli.rs index 283ac65c692..e69de29bb2d 100644 --- a/apps/src/bin/namada-client/cli.rs +++ b/apps/src/bin/namada-client/cli.rs @@ -1,544 +0,0 @@ -//! Namada client CLI. - -use color_eyre::eyre::{eyre, Report, Result}; -use namada::ledger::eth_bridge::bridge_pool; -use namada::ledger::rpc::wait_until_node_is_synched; -use namada::ledger::tx::dump_tx; -use namada::ledger::{signing, tx as sdk_tx}; -use namada::types::control_flow::ProceedOrElse; -use namada_apps::cli; -use namada_apps::cli::args::CliToSdk; -use namada_apps::cli::cmds::*; -use namada_apps::client::{rpc, tx, utils}; -use namada_apps::facade::tendermint_rpc::HttpClient; - -fn error() -> Report { - eyre!("Fatal error") -} - -pub async fn main() -> Result<()> { - match cli::namada_client_cli()? { - cli::NamadaClient::WithContext(cmd_box) => { - let (cmd, mut ctx) = *cmd_box; - use NamadaClientWithContext as Sub; - match cmd { - // Ledger cmds - Sub::TxCustom(TxCustom(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - let dry_run = args.tx.dry_run; - tx::submit_custom::(&client, &mut ctx, args) - .await?; - if !dry_run { - namada_apps::wallet::save(&ctx.wallet) - .unwrap_or_else(|err| eprintln!("{}", err)); - } else { - println!( - "Transaction dry run. No addresses have been \ - saved." - ) - } - } - Sub::TxTransfer(TxTransfer(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_transfer(&client, ctx, args).await?; - } - Sub::TxIbcTransfer(TxIbcTransfer(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_ibc_transfer::(&client, ctx, args) - .await?; - } - Sub::TxUpdateAccount(TxUpdateAccount(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_update_account::( - &client, &mut ctx, args, - ) - .await?; - } - Sub::TxInitAccount(TxInitAccount(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - let dry_run = args.tx.dry_run; - tx::submit_init_account::( - &client, &mut ctx, args, - ) - .await?; - if !dry_run { - namada_apps::wallet::save(&ctx.wallet) - .unwrap_or_else(|err| eprintln!("{}", err)); - } else { - println!( - "Transaction dry run. No addresses have been \ - saved." - ) - } - } - Sub::TxInitValidator(TxInitValidator(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_init_validator::(&client, ctx, args) - .await?; - } - Sub::TxInitProposal(TxInitProposal(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_init_proposal::(&client, ctx, args) - .await?; - } - Sub::TxVoteProposal(TxVoteProposal(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_vote_proposal::(&client, ctx, args) - .await?; - } - Sub::TxRevealPk(TxRevealPk(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_reveal_pk::(&client, &mut ctx, args) - .await?; - } - Sub::Bond(Bond(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_bond::(&client, &mut ctx, args) - .await?; - } - Sub::Unbond(Unbond(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_unbond::(&client, &mut ctx, args) - .await?; - } - Sub::Withdraw(Withdraw(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_withdraw::(&client, ctx, args) - .await?; - } - Sub::TxCommissionRateChange(TxCommissionRateChange( - mut args, - )) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client).await; - let args = args.to_sdk(&mut ctx); - tx::submit_validator_commission_change::( - &client, ctx, args, - ) - .await?; - } - // Eth bridge - Sub::AddToEthBridgePool(args) => { - let mut args = args.0; - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - let tx_args = args.tx.clone(); - - let default_signer = Some(args.sender.clone()); - let signing_data = signing::aux_signing_data( - &client, - &mut ctx.wallet, - &args.tx, - &args.sender, - default_signer, - ) - .await?; - - tx::submit_reveal_aux( - &client, - &mut ctx, - tx_args.clone(), - &args.sender, - ) - .await?; - - let tx = bridge_pool::build_bridge_pool_tx( - &client, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - - signing::generate_test_vector( - &client, - &mut ctx.wallet, - &tx, - ) - .await; - - if args.tx.dump_tx { - dump_tx(&args.tx, tx); - } else { - signing::sign_tx( - &mut ctx.wallet, - &tx_args, - &mut tx, - signing_data, - )?; - - sdk_tx::process_tx( - &client, - &mut ctx.wallet, - &tx_args, - tx, - ) - .await?; - } - } - Sub::TxUnjailValidator(TxUnjailValidator(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.tx.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - tx::submit_unjail_validator::( - &client, ctx, args, - ) - .await?; - } - // Ledger queries - Sub::QueryEpoch(QueryEpoch(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - rpc::query_and_print_epoch(&client).await; - } - Sub::QueryTransfers(QueryTransfers(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_transfers( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args, - ) - .await; - } - Sub::QueryConversions(QueryConversions(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_conversions(&client, &mut ctx.wallet, args) - .await; - } - Sub::QueryBlock(QueryBlock(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - rpc::query_block(&client).await; - } - Sub::QueryBalance(QueryBalance(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_balance( - &client, - &mut ctx.wallet, - &mut ctx.shielded, - args, - ) - .await; - } - Sub::QueryBonds(QueryBonds(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_bonds(&client, &mut ctx.wallet, args) - .await - .expect("expected successful query of bonds"); - } - Sub::QueryBondedStake(QueryBondedStake(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_bonded_stake(&client, args).await; - } - Sub::QueryValidatorState(QueryValidatorState(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_and_print_validator_state( - &client, - &mut ctx.wallet, - args, - ) - .await; - } - Sub::QueryCommissionRate(QueryCommissionRate(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_and_print_commission_rate( - &client, - &mut ctx.wallet, - args, - ) - .await; - } - Sub::QuerySlashes(QuerySlashes(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_slashes(&client, &mut ctx.wallet, args).await; - } - Sub::QueryDelegations(QueryDelegations(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_delegations(&client, &mut ctx.wallet, args) - .await; - } - Sub::QueryFindValidator(QueryFindValidator(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_find_validator(&client, args).await; - } - Sub::QueryResult(QueryResult(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_result(&client, args).await; - } - Sub::QueryRawBytes(QueryRawBytes(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_raw_bytes(&client, args).await; - } - - Sub::QueryProposal(QueryProposal(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_proposal(&client, args).await; - } - Sub::QueryProposalResult(QueryProposalResult(mut args)) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_proposal_result(&client, args).await; - } - Sub::QueryProtocolParameters(QueryProtocolParameters( - mut args, - )) => { - let client = HttpClient::new(utils::take_config_address( - &mut args.query.ledger_address, - )) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_protocol_parameters(&client, args).await; - } - Sub::QueryAccount(QueryAccount(args)) => { - let client = - HttpClient::new(args.query.ledger_address.clone()) - .unwrap(); - wait_until_node_is_synched(&client) - .await - .proceed_or_else(error)?; - let args = args.to_sdk(&mut ctx); - rpc::query_account(&client, args).await; - } - } - } - cli::NamadaClient::WithoutContext(cmd, global_args) => match cmd { - // Utils cmds - Utils::JoinNetwork(JoinNetwork(args)) => { - utils::join_network(global_args, args).await - } - Utils::FetchWasms(FetchWasms(args)) => { - utils::fetch_wasms(global_args, args).await - } - Utils::InitNetwork(InitNetwork(args)) => { - utils::init_network(global_args, args) - } - Utils::InitGenesisValidator(InitGenesisValidator(args)) => { - utils::init_genesis_validator(global_args, args) - } - Utils::PkToTmAddress(PkToTmAddress(args)) => { - utils::pk_to_tm_address(global_args, args) - } - Utils::DefaultBaseDir(DefaultBaseDir(args)) => { - utils::default_base_dir(global_args, args) - } - Utils::EpochSleep(EpochSleep(args)) => { - let mut ctx = cli::Context::new(global_args) - .expect("expected to construct a context"); - let ledger_address = args.ledger_address.clone(); - wait_until_node_is_synched(&ledger_address).await; - let client = HttpClient::new(ledger_address).unwrap(); - let args = args.to_sdk(&mut ctx); - rpc::epoch_sleep(&client, args).await; - } - }, - } - Ok(()) -} diff --git a/apps/src/bin/namada-client/main.rs b/apps/src/bin/namada-client/main.rs index a9e1fb4948c..9b43ca8f91b 100644 --- a/apps/src/bin/namada-client/main.rs +++ b/apps/src/bin/namada-client/main.rs @@ -1,5 +1,5 @@ use color_eyre::eyre::Result; -use namada_apps::cli::api::CliApi; +use namada_apps::cli::api::{CliApi, CliIo}; use namada_apps::facade::tendermint_rpc::HttpClient; use namada_apps::{cli, logging}; use tracing_subscriber::filter::LevelFilter; @@ -13,7 +13,7 @@ async fn main() -> Result<()> { let _log_guard = logging::init_from_env_or(LevelFilter::INFO)?; // run the CLI - CliApi::<()>::handle_client_command::( + CliApi::::handle_client_command::( None, cli::namada_client_cli()?, ) diff --git a/apps/src/bin/namada-relayer/main.rs b/apps/src/bin/namada-relayer/main.rs index 52c15192dc8..05d2620bcb0 100644 --- a/apps/src/bin/namada-relayer/main.rs +++ b/apps/src/bin/namada-relayer/main.rs @@ -1,6 +1,6 @@ use color_eyre::eyre::Result; use namada::tendermint_rpc::HttpClient; -use namada_apps::cli::api::CliApi; +use namada_apps::cli::api::{CliApi, CliIo}; use namada_apps::{cli, logging}; use tracing_subscriber::filter::LevelFilter; @@ -14,5 +14,5 @@ async fn main() -> Result<()> { let cmd = cli::namada_relayer_cli()?; // run the CLI - CliApi::<()>::handle_relayer_command::(None, cmd).await + CliApi::::handle_relayer_command::(None, cmd).await } diff --git a/apps/src/bin/namada-wallet/main.rs b/apps/src/bin/namada-wallet/main.rs index 7459234c791..5e94831716c 100644 --- a/apps/src/bin/namada-wallet/main.rs +++ b/apps/src/bin/namada-wallet/main.rs @@ -1,10 +1,10 @@ use color_eyre::eyre::Result; use namada_apps::cli; -use namada_apps::cli::api::CliApi; +use namada_apps::cli::api::{CliApi, CliIo}; pub fn main() -> Result<()> { color_eyre::install()?; let (cmd, ctx) = cli::namada_wallet_cli()?; // run the CLI - CliApi::<()>::handle_wallet_command(cmd, ctx) + CliApi::::handle_wallet_command(cmd, ctx) } diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 7ef136bc98c..657379b1e47 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -15,10 +15,12 @@ pub mod wallet; use clap::{ArgGroup, ArgMatches, ColorChoice}; use color_eyre::eyre::Result; +use namada::types::io::DefaultIo; use utils::*; -pub use utils::{dispatch_prompt, safe_exit, Cmd, TESTIN}; +pub use utils::{safe_exit, Cmd}; pub use self::context::Context; +use crate::cli::api::CliIo; include!("../../version.rs"); @@ -2290,10 +2292,12 @@ pub mod cmds { /// Used as sub-commands (`SubCmd` instance) in `namadar` binary. #[derive(Clone, Debug)] pub enum ValidatorSet { - /// Query an Ethereum ABI encoding of the consensus validator - /// set in Namada, at the given epoch, or the latest - /// one, if none is provided. - ConsensusValidatorSet(ConsensusValidatorSet), + /// Query the Bridge validator set in Namada, at the given epoch, + /// or the latest one, if none is provided. + BridgeValidatorSet(BridgeValidatorSet), + /// Query the Governance validator set in Namada, at the given epoch, + /// or the latest one, if none is provided. + GovernanceValidatorSet(GovernanceValidatorSet), /// Query an Ethereum ABI encoding of a proof of the consensus /// validator set in Namada, at the given epoch, or the next /// one, if none is provided. @@ -2308,14 +2312,19 @@ pub mod cmds { fn parse(matches: &ArgMatches) -> Option { matches.subcommand_matches(Self::CMD).and_then(|matches| { - let consensus_validator_set = - ConsensusValidatorSet::parse(matches) - .map(Self::ConsensusValidatorSet); + let bridge_validator_set = BridgeValidatorSet::parse(matches) + .map(Self::BridgeValidatorSet); + let governance_validator_set = + GovernanceValidatorSet::parse(matches) + .map(Self::GovernanceValidatorSet); let validator_set_proof = ValidatorSetProof::parse(matches) .map(Self::ValidatorSetProof); let relay = ValidatorSetUpdateRelay::parse(matches) .map(Self::ValidatorSetUpdateRelay); - consensus_validator_set.or(validator_set_proof).or(relay) + bridge_validator_set + .or(governance_validator_set) + .or(validator_set_proof) + .or(relay) }) } @@ -2327,34 +2336,56 @@ pub mod cmds { contracts.", ) .subcommand_required(true) - .subcommand(ConsensusValidatorSet::def().display_order(1)) + .subcommand(BridgeValidatorSet::def().display_order(1)) + .subcommand(GovernanceValidatorSet::def().display_order(1)) .subcommand(ValidatorSetProof::def().display_order(1)) .subcommand(ValidatorSetUpdateRelay::def().display_order(1)) } } #[derive(Clone, Debug)] - pub struct ConsensusValidatorSet( - pub args::ConsensusValidatorSet, + pub struct BridgeValidatorSet(pub args::BridgeValidatorSet); + + impl SubCmd for BridgeValidatorSet { + const CMD: &'static str = "bridge"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::BridgeValidatorSet::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Query the Bridge validator set in Namada, at the given \ + epoch, or the latest one, if none is provided.", + ) + .add_args::>() + } + } + + #[derive(Clone, Debug)] + pub struct GovernanceValidatorSet( + pub args::GovernanceValidatorSet, ); - impl SubCmd for ConsensusValidatorSet { - const CMD: &'static str = "consensus"; + impl SubCmd for GovernanceValidatorSet { + const CMD: &'static str = "governance"; fn parse(matches: &ArgMatches) -> Option { matches.subcommand_matches(Self::CMD).map(|matches| { - Self(args::ConsensusValidatorSet::parse(matches)) + Self(args::GovernanceValidatorSet::parse(matches)) }) } fn def() -> App { App::new(Self::CMD) .about( - "Query an Ethereum ABI encoding of the consensus \ - validator set in Namada, at the requested epoch, or the \ - current one, if no epoch is provided.", + "Query the Governance validator set in Namada, at the \ + given epoch, or the latest one, if none is provided.", ) - .add_args::>() + .add_args::>() } } @@ -2455,12 +2486,11 @@ pub mod args { use std::collections::HashMap; use std::convert::TryFrom; use std::env; - use std::net::SocketAddr; use std::path::PathBuf; use std::str::FromStr; use namada::ibc::core::ics24_host::identifier::{ChannelId, PortId}; - pub use namada::ledger::args::*; + pub use namada::sdk::args::*; use namada::types::address::Address; use namada::types::chain::{ChainId, ChainIdPrefix}; use namada::types::dec::Dec; @@ -2585,6 +2615,14 @@ pub mod args { arg_default("gas-limit", DefaultFn(|| GasLimit::from(20_000))); pub const FEE_TOKEN: ArgDefaultFromCtx = arg_default_from_ctx("gas-token", DefaultFn(|| "NAM".parse().unwrap())); + pub const FEE_PAYER: Arg = arg("fee-payer"); + pub const FEE_AMOUNT: ArgDefault = arg_default( + "fee-amount", + DefaultFn(|| token::DenominatedAmount { + amount: token::Amount::default(), + denom: NATIVE_MAX_DECIMAL_PLACES.into(), + }), + ); pub const GENESIS_PATH: Arg = arg("genesis-path"); pub const GENESIS_VALIDATOR: ArgOpt = arg("genesis-validator").opt(); @@ -2611,7 +2649,7 @@ pub mod args { arg("max-commission-rate-change"); pub const MAX_ETH_GAS: ArgOpt = arg_opt("max_eth-gas"); pub const MODE: ArgOpt = arg_opt("mode"); - pub const NET_ADDRESS: Arg = arg("net-address"); + pub const NET_ADDRESS: Arg = arg("net-address"); pub const NAMADA_START_TIME: ArgOpt = arg_opt("time"); pub const NO_CONVERSIONS: ArgFlag = flag("no-conversions"); pub const NUT: ArgFlag = flag("nut"); @@ -3184,18 +3222,18 @@ pub mod args { } } - impl CliToSdkCtxless> - for ConsensusValidatorSet + impl CliToSdkCtxless> + for BridgeValidatorSet { - fn to_sdk_ctxless(self) -> ConsensusValidatorSet { - ConsensusValidatorSet:: { + fn to_sdk_ctxless(self) -> BridgeValidatorSet { + BridgeValidatorSet:: { query: self.query.to_sdk_ctxless(), epoch: self.epoch, } } } - impl Args for ConsensusValidatorSet { + impl Args for BridgeValidatorSet { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let epoch = EPOCH.parse(matches); @@ -3205,12 +3243,37 @@ pub mod args { fn def(app: App) -> App { app.add_args::>().arg( EPOCH.def().help( - "The epoch of the consensus set of validators to query.", + "The epoch of the Bridge set of validators to query.", ), ) } } + impl CliToSdkCtxless> + for GovernanceValidatorSet + { + fn to_sdk_ctxless(self) -> GovernanceValidatorSet { + GovernanceValidatorSet:: { + query: self.query.to_sdk_ctxless(), + epoch: self.epoch, + } + } + } + + impl Args for GovernanceValidatorSet { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let epoch = EPOCH.parse(matches); + Self { query, epoch } + } + + fn def(app: App) -> App { + app.add_args::>().arg(EPOCH.def().help( + "The epoch of the Governance set of validators to query.", + )) + } + } + impl CliToSdkCtxless> for ValidatorSetProof { @@ -4233,14 +4296,6 @@ pub mod args { } } - impl CliToSdk> for QueryPgf { - fn to_sdk(self, ctx: &mut Context) -> QueryPgf { - QueryPgf:: { - query: self.query.to_sdk(ctx), - } - } - } - impl Args for QueryPgf { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); @@ -4253,6 +4308,14 @@ pub mod args { } } + impl CliToSdk> for QueryPgf { + fn to_sdk(self, ctx: &mut Context) -> QueryPgf { + QueryPgf:: { + query: self.query.to_sdk(ctx), + } + } + } + impl CliToSdk> for Withdraw { fn to_sdk(self, ctx: &mut Context) -> Withdraw { Withdraw:: { @@ -5567,7 +5630,7 @@ pub mod args { pub alias: String, pub commission_rate: Dec, pub max_commission_rate_change: Dec, - pub net_address: SocketAddr, + pub net_address: String, pub unsafe_dont_encrypt: bool, pub key_scheme: SchemeType, } @@ -5653,7 +5716,7 @@ pub fn namada_client_cli() -> Result { let global_args = args::Global::parse(&matches); match cmd { cmds::NamadaClient::WithContext(sub_cmd) => { - let context = Context::new(global_args)?; + let context = Context::new::(global_args)?; Ok(NamadaClient::WithContext(Box::new((sub_cmd, context)))) } cmds::NamadaClient::WithoutContext(sub_cmd) => { @@ -5689,7 +5752,7 @@ pub fn namada_relayer_cli() -> Result { cmds::EthBridgePool::WithContext(sub_cmd), ) => { let global_args = args::Global::parse(&matches); - let context = Context::new(global_args)?; + let context = Context::new::(global_args)?; Ok(NamadaRelayer::EthBridgePoolWithCtx(Box::new(( sub_cmd, context, )))) diff --git a/apps/src/lib/cli/api.rs b/apps/src/lib/cli/api.rs index c22fe39fd3d..bb387c5d9a5 100644 --- a/apps/src/lib/cli/api.rs +++ b/apps/src/lib/cli/api.rs @@ -1,9 +1,10 @@ use std::marker::PhantomData; -use namada::ledger::queries::Client; -use namada::ledger::rpc::wait_until_node_is_synched; +use namada::sdk::queries::Client; +use namada::sdk::rpc::wait_until_node_is_synched; use namada::tendermint_rpc::HttpClient; use namada::types::control_flow::Halt; +use namada::types::io::Io; use tendermint_config::net::Address as TendermintAddress; use crate::client::utils; @@ -12,7 +13,7 @@ use crate::client::utils; #[async_trait::async_trait(?Send)] pub trait CliClient: Client + Sync { fn from_tendermint_address(address: &mut TendermintAddress) -> Self; - async fn wait_until_node_is_synced(&self) -> Halt<()>; + async fn wait_until_node_is_synced(&self) -> Halt<()>; } #[async_trait::async_trait(?Send)] @@ -21,9 +22,14 @@ impl CliClient for HttpClient { HttpClient::new(utils::take_config_address(address)).unwrap() } - async fn wait_until_node_is_synced(&self) -> Halt<()> { - wait_until_node_is_synched(self).await + async fn wait_until_node_is_synced(&self) -> Halt<()> { + wait_until_node_is_synched::<_, IO>(self).await } } -pub struct CliApi(PhantomData); +pub struct CliIo; + +#[async_trait::async_trait(?Send)] +impl Io for CliIo {} + +pub struct CliApi(PhantomData); diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 675c5b4aea4..e136430abc7 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -1,8 +1,9 @@ use color_eyre::eyre::{eyre, Report, Result}; use namada::ledger::eth_bridge::bridge_pool; -use namada::ledger::tx::dump_tx; -use namada::ledger::{signing, tx as sdk_tx}; +use namada::sdk::tx::dump_tx; +use namada::sdk::{signing, tx as sdk_tx}; use namada::types::control_flow::ProceedOrElse; +use namada::types::io::Io; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -14,7 +15,7 @@ fn error() -> Report { eyre!("Fatal error") } -impl CliApi { +impl CliApi { pub async fn handle_client_command( client: Option, cmd: cli::NamadaClient, @@ -35,20 +36,21 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); let dry_run = args.tx.dry_run || args.tx.dry_run_wrapper; - tx::submit_custom(&client, &mut ctx, args).await?; + tx::submit_custom::<_, IO>(&client, &mut ctx, args) + .await?; if !dry_run { crate::wallet::save(&ctx.wallet) .unwrap_or_else(|err| eprintln!("{}", err)); } else { - println!( + IO::println( "Transaction dry run. No addresses have been \ - saved." + saved.", ) } } @@ -59,11 +61,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_transfer(&client, ctx, args).await?; + tx::submit_transfer::<_, IO>(&client, ctx, args) + .await?; } Sub::TxIbcTransfer(TxIbcTransfer(mut args)) => { let client = client.unwrap_or_else(|| { @@ -72,11 +75,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_ibc_transfer(&client, ctx, args).await?; + tx::submit_ibc_transfer::<_, IO>(&client, ctx, args) + .await?; } Sub::TxUpdateAccount(TxUpdateAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -85,12 +89,14 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_update_account(&client, &mut ctx, args) - .await?; + tx::submit_update_account::<_, IO>( + &client, &mut ctx, args, + ) + .await?; } Sub::TxInitAccount(TxInitAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -99,21 +105,23 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); let dry_run = args.tx.dry_run || args.tx.dry_run_wrapper; - tx::submit_init_account(&client, &mut ctx, args) - .await?; + tx::submit_init_account::<_, IO>( + &client, &mut ctx, args, + ) + .await?; if !dry_run { crate::wallet::save(&ctx.wallet) .unwrap_or_else(|err| eprintln!("{}", err)); } else { - println!( + IO::println( "Transaction dry run. No addresses have been \ - saved." + saved.", ) } } @@ -124,11 +132,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_init_validator(&client, ctx, args).await?; + tx::submit_init_validator::<_, IO>(&client, ctx, args) + .await?; } Sub::TxInitProposal(TxInitProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -137,11 +146,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_init_proposal(&client, ctx, args).await?; + tx::submit_init_proposal::<_, IO>(&client, ctx, args) + .await?; } Sub::TxVoteProposal(TxVoteProposal(mut args)) => { let client = client.unwrap_or_else(|| { @@ -150,11 +160,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_vote_proposal(&client, ctx, args).await?; + tx::submit_vote_proposal::<_, IO>(&client, ctx, args) + .await?; } Sub::TxRevealPk(TxRevealPk(mut args)) => { let client = client.unwrap_or_else(|| { @@ -163,11 +174,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_reveal_pk(&client, &mut ctx, args).await?; + tx::submit_reveal_pk::<_, IO>(&client, &mut ctx, args) + .await?; } Sub::Bond(Bond(mut args)) => { let client = client.unwrap_or_else(|| { @@ -176,11 +188,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_bond(&client, &mut ctx, args).await?; + tx::submit_bond::<_, IO>(&client, &mut ctx, args) + .await?; } Sub::Unbond(Unbond(mut args)) => { let client = client.unwrap_or_else(|| { @@ -189,11 +202,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_unbond(&client, &mut ctx, args).await?; + tx::submit_unbond::<_, IO>(&client, &mut ctx, args) + .await?; } Sub::Withdraw(Withdraw(mut args)) => { let client = client.unwrap_or_else(|| { @@ -202,11 +216,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_withdraw(&client, ctx, args).await?; + tx::submit_withdraw::<_, IO>(&client, ctx, args) + .await?; } Sub::TxCommissionRateChange(TxCommissionRateChange( mut args, @@ -217,11 +232,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_validator_commission_change( + tx::submit_validator_commission_change::<_, IO>( &client, ctx, args, ) .await?; @@ -235,24 +250,24 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); let tx_args = args.tx.clone(); let default_signer = Some(args.sender.clone()); - let signing_data = tx::aux_signing_data( + let signing_data = tx::aux_signing_data::<_, IO>( &client, &mut ctx.wallet, &args.tx, - &Some(args.sender.clone()), + Some(args.sender.clone()), default_signer, ) .await?; let (mut tx, _epoch) = - bridge_pool::build_bridge_pool_tx( + bridge_pool::build_bridge_pool_tx::<_, _, _, IO>( &client, &mut ctx.wallet, &mut ctx.shielded, @@ -261,7 +276,7 @@ impl CliApi { ) .await?; - signing::generate_test_vector( + signing::generate_test_vector::<_, _, IO>( &client, &mut ctx.wallet, &tx, @@ -269,9 +284,9 @@ impl CliApi { .await?; if args.tx.dump_tx { - dump_tx(&args.tx, tx); + dump_tx::(&args.tx, tx); } else { - tx::submit_reveal_aux( + tx::submit_reveal_aux::<_, IO>( &client, &mut ctx, tx_args.clone(), @@ -286,7 +301,7 @@ impl CliApi { signing_data, )?; - sdk_tx::process_tx( + sdk_tx::process_tx::<_, _, IO>( &client, &mut ctx.wallet, &tx_args, @@ -302,11 +317,14 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_unjail_validator(&client, ctx, args).await?; + tx::submit_unjail_validator::<_, IO>( + &client, ctx, args, + ) + .await?; } Sub::TxUpdateStewardCommission( TxUpdateStewardCommission(mut args), @@ -317,11 +335,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_update_steward_commission( + tx::submit_update_steward_commission::<_, IO>( &client, ctx, args, ) .await?; @@ -333,11 +351,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::submit_resign_steward(&client, ctx, args).await?; + tx::submit_resign_steward::<_, IO>(&client, ctx, args) + .await?; } // Ledger queries Sub::QueryEpoch(QueryEpoch(mut args)) => { @@ -345,10 +364,10 @@ impl CliApi { C::from_tendermint_address(&mut args.ledger_address) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; - rpc::query_and_print_epoch(&client).await; + rpc::query_and_print_epoch::<_, IO>(&client).await; } Sub::QueryValidatorState(QueryValidatorState(mut args)) => { let client = client.unwrap_or_else(|| { @@ -357,11 +376,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_and_print_validator_state( + rpc::query_and_print_validator_state::<_, IO>( &client, &mut ctx.wallet, args, @@ -375,11 +394,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_transfers( + rpc::query_transfers::<_, _, IO>( &client, &mut ctx.wallet, &mut ctx.shielded, @@ -394,22 +413,26 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_conversions(&client, &mut ctx.wallet, args) - .await; + rpc::query_conversions::<_, IO>( + &client, + &mut ctx.wallet, + args, + ) + .await; } Sub::QueryBlock(QueryBlock(mut args)) => { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; - rpc::query_block(&client).await; + rpc::query_block::<_, IO>(&client).await; } Sub::QueryBalance(QueryBalance(mut args)) => { let client = client.unwrap_or_else(|| { @@ -418,11 +441,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_balance( + rpc::query_balance::<_, _, IO>( &client, &mut ctx.wallet, &mut ctx.shielded, @@ -437,13 +460,17 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_bonds(&client, &mut ctx.wallet, args) - .await - .expect("expected successful query of bonds"); + rpc::query_bonds::<_, IO>( + &client, + &mut ctx.wallet, + args, + ) + .await + .expect("expected successful query of bonds"); } Sub::QueryBondedStake(QueryBondedStake(mut args)) => { let client = client.unwrap_or_else(|| { @@ -452,11 +479,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_bonded_stake(&client, args).await; + rpc::query_bonded_stake::<_, IO>(&client, args).await; } Sub::QueryCommissionRate(QueryCommissionRate(mut args)) => { let client = client.unwrap_or_else(|| { @@ -465,11 +492,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_and_print_commission_rate( + rpc::query_and_print_commission_rate::<_, IO>( &client, &mut ctx.wallet, args, @@ -483,12 +510,16 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_slashes(&client, &mut ctx.wallet, args) - .await; + rpc::query_slashes::<_, IO>( + &client, + &mut ctx.wallet, + args, + ) + .await; } Sub::QueryDelegations(QueryDelegations(mut args)) => { let client = client.unwrap_or_else(|| { @@ -497,12 +528,16 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_delegations(&client, &mut ctx.wallet, args) - .await; + rpc::query_delegations::<_, IO>( + &client, + &mut ctx.wallet, + args, + ) + .await; } Sub::QueryFindValidator(QueryFindValidator(mut args)) => { let client = client.unwrap_or_else(|| { @@ -511,11 +546,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_find_validator(&client, args).await; + rpc::query_find_validator::<_, IO>(&client, args).await; } Sub::QueryResult(QueryResult(mut args)) => { let client = client.unwrap_or_else(|| { @@ -524,11 +559,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_result(&client, args).await; + rpc::query_result::<_, IO>(&client, args).await; } Sub::QueryRawBytes(QueryRawBytes(mut args)) => { let client = client.unwrap_or_else(|| { @@ -537,13 +572,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_raw_bytes(&client, args).await; + rpc::query_raw_bytes::<_, IO>(&client, args).await; } - Sub::QueryProposal(QueryProposal(mut args)) => { let client = client.unwrap_or_else(|| { C::from_tendermint_address( @@ -551,11 +585,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_proposal(&client, args).await; + rpc::query_proposal::<_, IO>(&client, args).await; } Sub::QueryProposalResult(QueryProposalResult(mut args)) => { let client = client.unwrap_or_else(|| { @@ -564,11 +598,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_proposal_result(&client, args).await; + rpc::query_proposal_result::<_, IO>(&client, args) + .await; } Sub::QueryProtocolParameters(QueryProtocolParameters( mut args, @@ -579,11 +614,12 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_protocol_parameters(&client, args).await; + rpc::query_protocol_parameters::<_, IO>(&client, args) + .await; } Sub::QueryPgf(QueryPgf(mut args)) => { let client = client.unwrap_or_else(|| { @@ -592,11 +628,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_pgf(&client, args).await; + rpc::query_pgf::<_, IO>(&client, args).await; } Sub::QueryAccount(QueryAccount(mut args)) => { let client = client.unwrap_or_else(|| { @@ -605,11 +641,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::query_account(&client, args).await; + rpc::query_account::<_, IO>(&client, args).await; } Sub::SignTx(SignTx(mut args)) => { let client = client.unwrap_or_else(|| { @@ -618,11 +654,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - tx::sign_tx(&client, &mut ctx, args).await?; + tx::sign_tx::<_, IO>(&client, &mut ctx, args).await?; } } } @@ -647,17 +683,17 @@ impl CliApi { utils::default_base_dir(global_args, args) } Utils::EpochSleep(EpochSleep(args)) => { - let mut ctx = cli::Context::new(global_args) + let mut ctx = cli::Context::new::(global_args) .expect("expected to construct a context"); let mut ledger_address = args.ledger_address.clone(); let client = C::from_tendermint_address(&mut ledger_address); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - rpc::epoch_sleep(&client, args).await; + rpc::epoch_sleep::<_, IO>(&client, args).await; } }, } diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index 14a75ca4605..4aac8b1026f 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -6,11 +6,12 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; -use namada::ledger::masp::ShieldedContext; -use namada::ledger::wallet::Wallet; +use namada::sdk::masp::ShieldedContext; +use namada::sdk::wallet::Wallet; use namada::types::address::{Address, InternalAddress}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthAddress; +use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::*; @@ -83,7 +84,7 @@ pub struct Context { } impl Context { - pub fn new(global_args: args::Global) -> Result { + pub fn new(global_args: args::Global) -> Result { let global_config = read_or_try_new_global_config(&global_args); tracing::debug!("Chain ID: {}", global_config.default_chain_id); @@ -144,7 +145,7 @@ impl Context { wallet, global_config, config, - shielded: CLIShieldedUtils::new(chain_dir), + shielded: CLIShieldedUtils::new::(chain_dir), native_token, }) } diff --git a/apps/src/lib/cli/relayer.rs b/apps/src/lib/cli/relayer.rs index 242a9ff061b..3322e84e2fb 100644 --- a/apps/src/lib/cli/relayer.rs +++ b/apps/src/lib/cli/relayer.rs @@ -4,6 +4,7 @@ use color_eyre::eyre::{eyre, Report, Result}; use namada::eth_bridge::ethers::providers::{Http, Provider}; use namada::ledger::eth_bridge::{bridge_pool, validator_set}; use namada::types::control_flow::ProceedOrElse; +use namada::types::io::Io; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -14,7 +15,7 @@ fn error() -> Report { eyre!("Fatal error") } -impl CliApi { +impl CliApi { pub async fn handle_relayer_command( client: Option, cmd: cli::NamadaRelayer, @@ -35,11 +36,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk(&mut ctx); - bridge_pool::recommend_batch(&client, args) + bridge_pool::recommend_batch::<_, IO>(&client, args) .await .proceed_or_else(error)?; } @@ -55,11 +56,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - bridge_pool::construct_proof(&client, args) + bridge_pool::construct_proof::<_, IO>(&client, args) .await .proceed_or_else(error)?; } @@ -70,7 +71,7 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let eth_client = Arc::new( @@ -78,7 +79,7 @@ impl CliApi { .unwrap(), ); let args = args.to_sdk_ctxless(); - bridge_pool::relay_bridge_pool_proof( + bridge_pool::relay_bridge_pool_proof::<_, _, IO>( eth_client, &client, args, ) .await @@ -91,10 +92,10 @@ impl CliApi { C::from_tendermint_address(&mut query.ledger_address) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; - bridge_pool::query_bridge_pool(&client).await; + bridge_pool::query_bridge_pool::<_, IO>(&client).await; } EthBridgePoolWithoutCtx::QuerySigned( QuerySignedBridgePool(mut query), @@ -103,10 +104,10 @@ impl CliApi { C::from_tendermint_address(&mut query.ledger_address) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; - bridge_pool::query_signed_bridge_pool(&client) + bridge_pool::query_signed_bridge_pool::<_, IO>(&client) .await .proceed_or_else(error)?; } @@ -117,14 +118,14 @@ impl CliApi { C::from_tendermint_address(&mut query.ledger_address) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; - bridge_pool::query_relay_progress(&client).await; + bridge_pool::query_relay_progress::<_, IO>(&client).await; } }, cli::NamadaRelayer::ValidatorSet(sub) => match sub { - ValidatorSet::ConsensusValidatorSet(ConsensusValidatorSet( + ValidatorSet::BridgeValidatorSet(BridgeValidatorSet( mut args, )) => { let client = client.unwrap_or_else(|| { @@ -133,12 +134,32 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - validator_set::query_validator_set_args(&client, args) - .await; + validator_set::query_bridge_validator_set::<_, IO>( + &client, args, + ) + .await; + } + ValidatorSet::GovernanceValidatorSet( + GovernanceValidatorSet(mut args), + ) => { + let client = client.unwrap_or_else(|| { + C::from_tendermint_address( + &mut args.query.ledger_address, + ) + }); + client + .wait_until_node_is_synced::() + .await + .proceed_or_else(error)?; + let args = args.to_sdk_ctxless(); + validator_set::query_governnace_validator_set::<_, IO>( + &client, args, + ) + .await; } ValidatorSet::ValidatorSetProof(ValidatorSetProof( mut args, @@ -149,11 +170,11 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let args = args.to_sdk_ctxless(); - validator_set::query_validator_set_update_proof( + validator_set::query_validator_set_update_proof::<_, IO>( &client, args, ) .await; @@ -167,7 +188,7 @@ impl CliApi { ) }); client - .wait_until_node_is_synced() + .wait_until_node_is_synced::() .await .proceed_or_else(error)?; let eth_client = Arc::new( @@ -175,7 +196,7 @@ impl CliApi { .unwrap(), ); let args = args.to_sdk_ctxless(); - validator_set::relay_validator_set_update( + validator_set::relay_validator_set_update::<_, _, IO>( eth_client, &client, args, ) .await diff --git a/apps/src/lib/cli/utils.rs b/apps/src/lib/cli/utils.rs index aed45507d90..26cc38ff7fb 100644 --- a/apps/src/lib/cli/utils.rs +++ b/apps/src/lib/cli/utils.rs @@ -6,10 +6,10 @@ use std::str::FromStr; use clap::{ArgAction, ArgMatches}; use color_eyre::eyre::Result; -use lazy_static::lazy_static; use super::args; use super::context::{Context, FromContext}; +use crate::cli::api::CliIo; // We only use static strings pub type App = clap::Command; @@ -24,7 +24,7 @@ pub trait Cmd: Sized { match Self::parse(&matches) { Some(cmd) => { let global_args = args::Global::parse(&matches); - let context = Context::new(global_args)?; + let context = Context::new::(global_args)?; Ok((cmd, context)) } None => { @@ -362,53 +362,3 @@ pub fn safe_exit(_: i32) -> ! { panic!("Test failed because the client exited unexpectedly.") } - -lazy_static! { - /// A replacement for stdin in testing. - pub static ref TESTIN: std::sync::Arc>> = - std::sync::Arc::new(std::sync::Mutex::new(vec![])); -} - -/// A generic function for displaying a prompt to users and reading -/// in their response. -fn prompt_aux(mut reader: R, mut writer: W, question: &str) -> String -where - R: std::io::Read, - W: Write, -{ - write!(&mut writer, "{}", question).expect("Unable to write"); - writer.flush().unwrap(); - let mut s = String::new(); - reader.read_to_string(&mut s).expect("Unable to read"); - s -} - -/// A function that chooses how to dispatch prompts -/// to users. There is a hierarchy of feature flags -/// that determines this. If no flags are set, -/// the question is printed to stdout and response -/// read from stdin. -pub fn dispatch_prompt(question: impl AsRef) -> String { - if cfg!(feature = "testing") { - prompt_aux( - TESTIN.lock().unwrap().as_slice(), - std::io::stdout(), - question.as_ref(), - ) - } else { - prompt_aux( - std::io::stdin().lock(), - std::io::stdout(), - question.as_ref(), - ) - } -} - -#[macro_export] -/// A convenience macro for formatting the user prompt before -/// forwarding it to the `[dispatch_prompt]` method. -macro_rules! prompt { - ($($arg:tt)*) => {{ - $crate::cli::dispatch_prompt(format!("{}", format_args!($($arg)*))) - }} -} diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index 7505c59efe3..d039c7ef10f 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -7,10 +7,12 @@ use borsh::BorshSerialize; use color_eyre::eyre::Result; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::ledger::masp::find_valid_diversifier; -use namada::ledger::wallet::{DecryptionError, FindKeyError}; +use namada::sdk::masp::find_valid_diversifier; +use namada::sdk::wallet::{DecryptionError, FindKeyError}; +use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{MaspValue, PaymentAddress}; +use namada::{display, display_line, edisplay_line}; use rand_core::OsRng; use crate::cli; @@ -19,7 +21,7 @@ use crate::cli::args::CliToSdk; use crate::cli::{args, cmds, Context}; use crate::wallet::{read_and_confirm_encryption_password, CliWalletUtils}; -impl CliApi { +impl CliApi { pub fn handle_wallet_command( cmd: cmds::NamadaWallet, mut ctx: Context, @@ -27,57 +29,57 @@ impl CliApi { match cmd { cmds::NamadaWallet::Key(sub) => match sub { cmds::WalletKey::Restore(cmds::KeyRestore(args)) => { - key_and_address_restore(ctx, args) + key_and_address_restore::(ctx, args) } cmds::WalletKey::Gen(cmds::KeyGen(args)) => { - key_and_address_gen(ctx, args) + key_and_address_gen::(ctx, args) } cmds::WalletKey::Find(cmds::KeyFind(args)) => { - key_find(ctx, args) + key_find::(ctx, args) } cmds::WalletKey::List(cmds::KeyList(args)) => { - key_list(ctx, args) + key_list::(ctx, args) } cmds::WalletKey::Export(cmds::Export(args)) => { - key_export(ctx, args) + key_export::(ctx, args) } }, cmds::NamadaWallet::Address(sub) => match sub { cmds::WalletAddress::Gen(cmds::AddressGen(args)) => { - key_and_address_gen(ctx, args) + key_and_address_gen::(ctx, args) } cmds::WalletAddress::Restore(cmds::AddressRestore(args)) => { - key_and_address_restore(ctx, args) + key_and_address_restore::(ctx, args) } cmds::WalletAddress::Find(cmds::AddressOrAliasFind(args)) => { - address_or_alias_find(ctx, args) + address_or_alias_find::(ctx, args) } cmds::WalletAddress::List(cmds::AddressList) => { - address_list(ctx) + address_list::(ctx) } cmds::WalletAddress::Add(cmds::AddressAdd(args)) => { - address_add(ctx, args) + address_add::(ctx, args) } }, cmds::NamadaWallet::Masp(sub) => match sub { cmds::WalletMasp::GenSpendKey(cmds::MaspGenSpendKey(args)) => { - spending_key_gen(ctx, args) + spending_key_gen::(ctx, args) } cmds::WalletMasp::GenPayAddr(cmds::MaspGenPayAddr(args)) => { let args = args.to_sdk(&mut ctx); - payment_address_gen(ctx, args) + payment_address_gen::(ctx, args) } cmds::WalletMasp::AddAddrKey(cmds::MaspAddAddrKey(args)) => { - address_key_add(ctx, args) + address_key_add::(ctx, args) } cmds::WalletMasp::ListPayAddrs(cmds::MaspListPayAddrs) => { - payment_addresses_list(ctx) + payment_addresses_list::(ctx) } cmds::WalletMasp::ListKeys(cmds::MaspListKeys(args)) => { - spending_keys_list(ctx, args) + spending_keys_list::(ctx, args) } cmds::WalletMasp::FindAddrKey(cmds::MaspFindAddrKey(args)) => { - address_key_find(ctx, args) + address_key_find::(ctx, args) } }, } @@ -86,7 +88,7 @@ impl CliApi { } /// Find shielded address or key -fn address_key_find( +fn address_key_find( ctx: Context, args::AddrKeyFind { alias, @@ -97,21 +99,24 @@ fn address_key_find( let alias = alias.to_lowercase(); if let Ok(viewing_key) = wallet.find_viewing_key(&alias) { // Check if alias is a viewing key - println!("Viewing key: {}", viewing_key); + display_line!(IO, "Viewing key: {}", viewing_key); if unsafe_show_secret { // Check if alias is also a spending key match wallet.find_spending_key(&alias, None) { - Ok(spending_key) => println!("Spending key: {}", spending_key), + Ok(spending_key) => { + display_line!(IO, "Spending key: {}", spending_key) + } Err(FindKeyError::KeyNotFound) => {} - Err(err) => eprintln!("{}", err), + Err(err) => edisplay_line!(IO, "{}", err), } } } else if let Some(payment_addr) = wallet.find_payment_addr(&alias) { // Failing that, check if alias is a payment address - println!("Payment address: {}", payment_addr); + display_line!(IO, "Payment address: {}", payment_addr); } else { // Otherwise alias cannot be referring to any shielded value - println!( + display_line!( + IO, "No shielded address or key with alias {} found. Use the commands \ `masp list-addrs` and `masp list-keys` to see all the known \ addresses and keys.", @@ -121,7 +126,7 @@ fn address_key_find( } /// List spending keys. -fn spending_keys_list( +fn spending_keys_list( ctx: Context, args::MaspKeysList { decrypt, @@ -132,32 +137,33 @@ fn spending_keys_list( let known_view_keys = wallet.get_viewing_keys(); let known_spend_keys = wallet.get_spending_keys(); if known_view_keys.is_empty() { - println!( + display_line!( + IO, "No known keys. Try `masp add --alias my-addr --value ...` to add \ - a new key to the wallet." + a new key to the wallet.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!(w, "Known keys:").unwrap(); + display_line!(IO, &mut w; "Known keys:").unwrap(); for (alias, key) in known_view_keys { - write!(w, " Alias \"{}\"", alias).unwrap(); + display!(IO, &mut w; " Alias \"{}\"", alias).unwrap(); let spending_key_opt = known_spend_keys.get(&alias); // If this alias is associated with a spending key, indicate whether // or not the spending key is encrypted // TODO: consider turning if let into match if let Some(spending_key) = spending_key_opt { if spending_key.is_encrypted() { - writeln!(w, " (encrypted):") + display_line!(IO, &mut w; " (encrypted):") } else { - writeln!(w, " (not encrypted):") + display_line!(IO, &mut w; " (not encrypted):") } .unwrap(); } else { - writeln!(w, ":").unwrap(); + display_line!(IO, &mut w; ":").unwrap(); } // Always print the corresponding viewing key - writeln!(w, " Viewing Key: {}", key).unwrap(); + display_line!(IO, &mut w; " Viewing Key: {}", key).unwrap(); // A subset of viewing keys will have corresponding spending keys. // Print those too if they are available and requested. if unsafe_show_secret { @@ -166,8 +172,11 @@ fn spending_keys_list( // Here the spending key is unencrypted or successfully // decrypted Ok(spending_key) => { - writeln!(w, " Spending key: {}", spending_key) - .unwrap(); + display_line!(IO, + &mut w; + " Spending key: {}", spending_key, + ) + .unwrap(); } // Here the key is encrypted but decryption has not been // requested @@ -177,10 +186,10 @@ fn spending_keys_list( // Here the key is encrypted but incorrect password has // been provided Err(err) => { - writeln!( - w, - " Couldn't decrypt the spending key: {}", - err + display_line!(IO, + &mut w; + " Couldn't decrypt the spending key: {}", + err, ) .unwrap(); } @@ -192,26 +201,27 @@ fn spending_keys_list( } /// List payment addresses. -fn payment_addresses_list(ctx: Context) { +fn payment_addresses_list(ctx: Context) { let wallet = ctx.wallet; let known_addresses = wallet.get_payment_addrs(); if known_addresses.is_empty() { - println!( + display_line!( + IO, "No known payment addresses. Try `masp gen-addr --alias my-addr` \ - to generate a new payment address." + to generate a new payment address.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!(w, "Known payment addresses:").unwrap(); + display_line!(IO, &mut w; "Known payment addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - writeln!(w, " \"{}\": {}", alias, address).unwrap(); + display_line!(IO, &mut w; " \"{}\": {}", alias, address).unwrap(); } } } /// Generate a spending key. -fn spending_key_gen( +fn spending_key_gen( ctx: Context, args::MaspSpendKeyGen { alias, @@ -224,14 +234,15 @@ fn spending_key_gen( let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (alias, _key) = wallet.gen_spending_key(alias, password, alias_force); crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); - println!( + display_line!( + IO, "Successfully added a spending key with alias: \"{}\"", alias ); } /// Generate a shielded payment address from the given key. -fn payment_address_gen( +fn payment_address_gen( ctx: Context, args::MaspPayAddrGen { alias, @@ -254,18 +265,19 @@ fn payment_address_gen( alias_force, ) .unwrap_or_else(|| { - eprintln!("Payment address not added"); + edisplay_line!(IO, "Payment address not added"); cli::safe_exit(1); }); crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); - println!( + display_line!( + IO, "Successfully generated a payment address with the following alias: {}", alias, ); } /// Add a viewing key, spending key, or payment address to wallet. -fn address_key_add( +fn address_key_add( mut ctx: Context, args::MaspAddrKeyAdd { alias, @@ -281,7 +293,7 @@ fn address_key_add( .wallet .insert_viewing_key(alias, viewing_key, alias_force) .unwrap_or_else(|| { - eprintln!("Viewing key not added"); + edisplay_line!(IO, "Viewing key not added"); cli::safe_exit(1); }); (alias, "viewing key") @@ -298,7 +310,7 @@ fn address_key_add( alias_force, ) .unwrap_or_else(|| { - eprintln!("Spending key not added"); + edisplay_line!(IO, "Spending key not added"); cli::safe_exit(1); }); (alias, "spending key") @@ -308,22 +320,24 @@ fn address_key_add( .wallet .insert_payment_addr(alias, payment_addr, alias_force) .unwrap_or_else(|| { - eprintln!("Payment address not added"); + edisplay_line!(IO, "Payment address not added"); cli::safe_exit(1); }); (alias, "payment address") } }; crate::wallet::save(&ctx.wallet).unwrap_or_else(|err| eprintln!("{}", err)); - println!( + display_line!( + IO, "Successfully added a {} with the following alias to wallet: {}", - typ, alias, + typ, + alias, ); } /// Restore a keypair and an implicit address from the mnemonic code in the /// wallet. -fn key_and_address_restore( +fn key_and_address_restore( ctx: Context, args::KeyAndAddressRestore { scheme, @@ -345,15 +359,17 @@ fn key_and_address_restore( encryption_password, ) .unwrap_or_else(|err| { - eprintln!("{}", err); + edisplay_line!(IO, "{}", err); cli::safe_exit(1) }) .unwrap_or_else(|| { - println!("No changes are persisted. Exiting."); + display_line!(IO, "No changes are persisted. Exiting."); cli::safe_exit(0); }); - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); - println!( + crate::wallet::save(&wallet) + .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + display_line!( + IO, "Successfully added a key and an address with alias: \"{}\"", alias ); @@ -361,7 +377,7 @@ fn key_and_address_restore( /// Generate a new keypair and derive implicit address from it and store them in /// the wallet. -fn key_and_address_gen( +fn key_and_address_gen( ctx: Context, args::KeyAndAddressGen { scheme, @@ -386,22 +402,24 @@ fn key_and_address_gen( derivation_path_and_mnemonic_rng, ) .unwrap_or_else(|err| { - eprintln!("{}", err); + edisplay_line!(IO, "{}", err); cli::safe_exit(1); }) .unwrap_or_else(|| { - println!("No changes are persisted. Exiting."); + display_line!(IO, "No changes are persisted. Exiting."); cli::safe_exit(0); }); - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); - println!( + crate::wallet::save(&wallet) + .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + display_line!( + IO, "Successfully added a key and an address with alias: \"{}\"", alias ); } /// Find a keypair in the wallet store. -fn key_find( +fn key_find( ctx: Context, args::KeyFind { public_key, @@ -417,9 +435,10 @@ fn key_find( let alias = alias.or(value); match alias { None => { - eprintln!( + edisplay_line!( + IO, "An alias, public key or public key hash needs to be \ - supplied" + supplied", ); cli::safe_exit(1) } @@ -430,20 +449,20 @@ fn key_find( match found_keypair { Ok(keypair) => { let pkh: PublicKeyHash = (&keypair.ref_to()).into(); - println!("Public key hash: {}", pkh); - println!("Public key: {}", keypair.ref_to()); + display_line!(IO, "Public key hash: {}", pkh); + display_line!(IO, "Public key: {}", keypair.ref_to()); if unsafe_show_secret { - println!("Secret key: {}", keypair); + display_line!(IO, "Secret key: {}", keypair); } } Err(err) => { - eprintln!("{}", err); + edisplay_line!(IO, "{}", err); } } } /// List all known keys. -fn key_list( +fn key_list( ctx: Context, args::KeyList { decrypt, @@ -453,38 +472,54 @@ fn key_list( let wallet = ctx.wallet; let known_keys = wallet.get_keys(); if known_keys.is_empty() { - println!( + display_line!( + IO, "No known keys. Try `key gen --alias my-key` to generate a new \ - key." + key.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!(w, "Known keys:").unwrap(); + display_line!(IO, &mut w; "Known keys:").unwrap(); for (alias, (stored_keypair, pkh)) in known_keys { let encrypted = if stored_keypair.is_encrypted() { "encrypted" } else { "not encrypted" }; - writeln!(w, " Alias \"{}\" ({}):", alias, encrypted).unwrap(); + display_line!(IO, + &mut w; + " Alias \"{}\" ({}):", alias, encrypted, + ) + .unwrap(); if let Some(pkh) = pkh { - writeln!(w, " Public key hash: {}", pkh).unwrap(); + display_line!(IO, &mut w; " Public key hash: {}", pkh) + .unwrap(); } match stored_keypair.get::(decrypt, None) { Ok(keypair) => { - writeln!(w, " Public key: {}", keypair.ref_to()) - .unwrap(); + display_line!(IO, + &mut w; + " Public key: {}", keypair.ref_to(), + ) + .unwrap(); if unsafe_show_secret { - writeln!(w, " Secret key: {}", keypair).unwrap(); + display_line!(IO, + &mut w; + " Secret key: {}", keypair, + ) + .unwrap(); } } Err(DecryptionError::NotDecrypting) if !decrypt => { continue; } Err(err) => { - writeln!(w, " Couldn't decrypt the keypair: {}", err) - .unwrap(); + display_line!(IO, + &mut w; + " Couldn't decrypt the keypair: {}", err, + ) + .unwrap(); } } } @@ -492,7 +527,10 @@ fn key_list( } /// Export a keypair to a file. -fn key_export(ctx: Context, args::KeyExport { alias }: args::KeyExport) { +fn key_export( + ctx: Context, + args::KeyExport { alias }: args::KeyExport, +) { let mut wallet = ctx.wallet; wallet .find_key(alias.to_lowercase(), None) @@ -504,36 +542,40 @@ fn key_export(ctx: Context, args::KeyExport { alias }: args::KeyExport) { let mut file = File::create(&file_name).unwrap(); file.write_all(file_data.as_ref()).unwrap(); - println!("Exported to file {}", file_name); + display_line!(IO, "Exported to file {}", file_name); }) .unwrap_or_else(|err| { - eprintln!("{}", err); + edisplay_line!(IO, "{}", err); cli::safe_exit(1) }) } /// List all known addresses. -fn address_list(ctx: Context) { +fn address_list(ctx: Context) { let wallet = ctx.wallet; let known_addresses = wallet.get_addresses(); if known_addresses.is_empty() { - println!( + display_line!( + IO, "No known addresses. Try `address gen --alias my-addr` to \ - generate a new implicit address." + generate a new implicit address.", ); } else { let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!(w, "Known addresses:").unwrap(); + display_line!(IO, &mut w; "Known addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - writeln!(w, " \"{}\": {}", alias, address.to_pretty_string()) - .unwrap(); + display_line!(IO, + &mut w; + " \"{}\": {}", alias, address.to_pretty_string(), + ) + .unwrap(); } } } /// Find address (alias) by its alias (address). -fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { +fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { let wallet = ctx.wallet; if args.address.is_some() && args.alias.is_some() { panic!( @@ -543,9 +585,10 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } else if args.alias.is_some() { if let Some(address) = wallet.find_address(args.alias.as_ref().unwrap()) { - println!("Found address {}", address.to_pretty_string()); + display_line!(IO, "Found address {}", address.to_pretty_string()); } else { - println!( + display_line!( + IO, "No address with alias {} found. Use the command `address \ list` to see all the known addresses.", args.alias.unwrap().to_lowercase() @@ -553,9 +596,10 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } } else if args.address.is_some() { if let Some(alias) = wallet.find_alias(args.address.as_ref().unwrap()) { - println!("Found alias {}", alias); + display_line!(IO, "Found alias {}", alias); } else { - println!( + display_line!( + IO, "No alias with address {} found. Use the command `address \ list` to see all the known addresses.", args.address.unwrap() @@ -565,7 +609,7 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { } /// Add an address to the wallet. -fn address_add(ctx: Context, args: args::AddressAdd) { +fn address_add(ctx: Context, args: args::AddressAdd) { let mut wallet = ctx.wallet; if wallet .add_address( @@ -575,11 +619,13 @@ fn address_add(ctx: Context, args: args::AddressAdd) { ) .is_none() { - eprintln!("Address not added"); + edisplay_line!(IO, "Address not added"); cli::safe_exit(1); } - crate::wallet::save(&wallet).unwrap_or_else(|err| eprintln!("{}", err)); - println!( + crate::wallet::save(&wallet) + .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); + display_line!( + IO, "Successfully added a key and an address with alias: \"{}\"", args.alias.to_lowercase() ); diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index b1b46ef7fa1..d750ccc7590 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -3,7 +3,7 @@ use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fs::{self, read_dir}; -use std::io::{self, Write}; +use std::io; use std::iter::Iterator; use std::str::FromStr; @@ -29,46 +29,51 @@ use namada::core::ledger::governance::utils::{ use namada::core::ledger::pgf::parameters::PgfParameters; use namada::core::ledger::pgf::storage::steward::StewardDetail; use namada::ledger::events::Event; -use namada::ledger::masp::{ - Conversions, MaspAmount, MaspChange, ShieldedContext, ShieldedUtils, -}; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::{CommissionPair, PosParams, Slash}; use namada::ledger::queries::RPC; -use namada::ledger::rpc::{ +use namada::ledger::storage::ConversionState; +use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; +use namada::sdk::error; +use namada::sdk::error::{is_pinned_error, Error, PinnedBalanceError}; +use namada::sdk::masp::{ + Conversions, MaspAmount, MaspChange, ShieldedContext, ShieldedUtils, +}; +use namada::sdk::rpc::{ self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, TxResponse, }; -use namada::ledger::storage::ConversionState; -use namada::ledger::wallet::{AddressVpType, Wallet}; -use namada::proof_of_stake::types::{ValidatorState, WeightedValidator}; +use namada::sdk::wallet::{AddressVpType, Wallet}; use namada::types::address::{masp, Address}; use namada::types::control_flow::ProceedOrElse; -use namada::types::error::{is_pinned_error, Error, PinnedBalanceError}; use namada::types::hash::Hash; +use namada::types::io::Io; use namada::types::key::*; use namada::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use namada::types::storage::{BlockHeight, BlockResults, Epoch, Key, KeySeg}; use namada::types::token::{Change, MaspDenom}; -use namada::types::{error, storage, token}; +use namada::types::{storage, token}; +use namada::{display, display_line, edisplay_line, prompt}; use tokio::time::Instant; use crate::cli::{self, args}; use crate::facade::tendermint::merkle::proof::Proof; use crate::facade::tendermint_rpc::error::Error as TError; -use crate::prompt; use crate::wallet::CliWalletUtils; /// Query the status of a given transaction. /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status( +pub async fn query_tx_status< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, - status: namada::ledger::rpc::TxEventQuery<'_>, + status: namada::sdk::rpc::TxEventQuery<'_>, deadline: Instant, ) -> Event { - namada::ledger::rpc::query_tx_status(client, status, deadline) + rpc::query_tx_status::<_, IO>(client, status, deadline) .await .proceed() } @@ -76,28 +81,32 @@ pub async fn query_tx_status( /// Query and print the epoch of the last committed block pub async fn query_and_print_epoch< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, ) -> Epoch { - let epoch = namada::ledger::rpc::query_epoch(client).await.unwrap(); - println!("Last committed epoch: {}", epoch); + let epoch = rpc::query_epoch(client).await.unwrap(); + display_line!(IO, "Last committed epoch: {}", epoch); epoch } /// Query the last committed block -pub async fn query_block( +pub async fn query_block( client: &C, ) { - let block = namada::ledger::rpc::query_block(client).await.unwrap(); + let block = namada::sdk::rpc::query_block(client).await.unwrap(); match block { Some(block) => { - println!( + display_line!( + IO, "Last committed block ID: {}, height: {}, time: {}", - block.hash, block.height, block.time + block.hash, + block.height, + block.time ); } None => { - println!("No block has been committed yet."); + display_line!(IO, "No block has been committed yet."); } } } @@ -116,6 +125,7 @@ pub async fn query_results( pub async fn query_transfers< C: namada::ledger::queries::Client + Sync, U: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -163,7 +173,7 @@ pub async fn query_transfers< // Realize the rewards that would have been attained upon the // transaction's reception let amt = shielded - .compute_exchanged_amount( + .compute_exchanged_amount::<_, IO>( client, amt, epoch, @@ -194,33 +204,43 @@ pub async fn query_transfers< if !relevant { continue; } - println!("Height: {}, Index: {}, Transparent Transfer:", height, idx); + display_line!( + IO, + "Height: {}, Index: {}, Transparent Transfer:", + height, + idx + ); // Display the transparent changes first for (account, MaspChange { ref asset, change }) in tfer_delta { if account != masp() { - print!(" {}:", account); + display!(IO, " {}:", account); let token_alias = wallet.lookup_alias(asset); let sign = match change.cmp(&Change::zero()) { Ordering::Greater => "+", Ordering::Less => "-", Ordering::Equal => "", }; - print!( + display!( + IO, " {}{} {}", sign, - format_denominated_amount(client, asset, change.into(),) - .await, + format_denominated_amount::<_, IO>( + client, + asset, + change.into(), + ) + .await, token_alias ); } - println!(); + display_line!(IO, ""); } // Then display the shielded changes afterwards // TODO: turn this to a display impl // (account, amt) for (account, masp_change) in shielded_accounts { if fvk_map.contains_key(&account) { - print!(" {}:", fvk_map[&account]); + display!(IO, " {}:", fvk_map[&account]); for (token_addr, val) in masp_change { let token_alias = wallet.lookup_alias(&token_addr); let sign = match val.cmp(&Change::zero()) { @@ -228,10 +248,11 @@ pub async fn query_transfers< Ordering::Less => "-", Ordering::Equal => "", }; - print!( + display!( + IO, " {}{} {}", sign, - format_denominated_amount( + format_denominated_amount::<_, IO>( client, &token_addr, val.into(), @@ -240,14 +261,17 @@ pub async fn query_transfers< token_alias, ); } - println!(); + display_line!(IO, ""); } } } } /// Query the raw bytes of given storage key -pub async fn query_raw_bytes( +pub async fn query_raw_bytes< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, args: args::QueryRawBytes, ) { @@ -257,9 +281,9 @@ pub async fn query_raw_bytes( .await, ); if !response.data.is_empty() { - println!("Found data: 0x{}", HEXLOWER.encode(&response.data)); + display_line!(IO, "Found data: 0x{}", HEXLOWER.encode(&response.data)); } else { - println!("No data found for key {}", args.storage_key); + display_line!(IO, "No data found for key {}", args.storage_key); } } @@ -267,6 +291,7 @@ pub async fn query_raw_bytes( pub async fn query_balance< C: namada::ledger::queries::Client + Sync, U: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -277,22 +302,35 @@ pub async fn query_balance< // the CLI arguments match &args.owner { Some(BalanceOwner::FullViewingKey(_viewing_key)) => { - query_shielded_balance(client, wallet, shielded, args).await + query_shielded_balance::<_, _, IO>(client, wallet, shielded, args) + .await } Some(BalanceOwner::Address(_owner)) => { - query_transparent_balance(client, wallet, args).await + query_transparent_balance::<_, IO>(client, wallet, args).await } Some(BalanceOwner::PaymentAddress(_owner)) => { - query_pinned_balance(client, wallet, shielded, args).await + query_pinned_balance::<_, _, IO>(client, wallet, shielded, args) + .await } None => { // Print pinned balance - query_pinned_balance(client, wallet, shielded, args.clone()).await; + query_pinned_balance::<_, _, IO>( + client, + wallet, + shielded, + args.clone(), + ) + .await; // Print shielded balance - query_shielded_balance(client, wallet, shielded, args.clone()) - .await; + query_shielded_balance::<_, _, IO>( + client, + wallet, + shielded, + args.clone(), + ) + .await; // Then print transparent balance - query_transparent_balance(client, wallet, args).await; + query_transparent_balance::<_, IO>(client, wallet, args).await; } }; } @@ -300,6 +338,7 @@ pub async fn query_balance< /// Query token balance(s) pub async fn query_transparent_balance< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -319,14 +358,20 @@ pub async fn query_transparent_balance< .await { Ok(balance) => { - let balance = - format_denominated_amount(client, &token, balance) - .await; - println!("{}: {}", token_alias, balance); + let balance = format_denominated_amount::<_, IO>( + client, &token, balance, + ) + .await; + display_line!(IO, "{}: {}", token_alias, balance); } Err(e) => { - println!("Eror in querying: {e}"); - println!("No {} balance found for {}", token_alias, owner) + display_line!(IO, "Eror in querying: {e}"); + display_line!( + IO, + "No {} balance found for {}", + token_alias, + owner + ) } } } @@ -335,27 +380,37 @@ pub async fn query_transparent_balance< for (token_alias, token) in tokens { let balance = get_token_balance(client, &token, &owner).await; if !balance.is_zero() { - let balance = - format_denominated_amount(client, &token, balance) - .await; - println!("{}: {}", token_alias, balance); + let balance = format_denominated_amount::<_, IO>( + client, &token, balance, + ) + .await; + display_line!(IO, "{}: {}", token_alias, balance); } } } (Some(token), None) => { let prefix = token::balance_prefix(&token); let balances = - query_storage_prefix::(client, &prefix).await; - if let Some(balances) = balances { - print_balances(client, wallet, balances, Some(&token), None) + query_storage_prefix::(client, &prefix) .await; + if let Some(balances) = balances { + print_balances::<_, IO>( + client, + wallet, + balances, + Some(&token), + None, + ) + .await; } } (None, None) => { let balances = - query_storage_prefix::(client, &prefix).await; + query_storage_prefix::(client, &prefix) + .await; if let Some(balances) = balances { - print_balances(client, wallet, balances, None, None).await; + print_balances::<_, IO>(client, wallet, balances, None, None) + .await; } } } @@ -365,6 +420,7 @@ pub async fn query_transparent_balance< pub async fn query_pinned_balance< C: namada::ledger::queries::Client + Sync, U: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -398,7 +454,7 @@ pub async fn query_pinned_balance< // address for vk in &viewing_keys { balance = shielded - .compute_exchanged_pinned_balance(client, owner, vk) + .compute_exchanged_pinned_balance::<_, IO>(client, owner, vk) .await; if !is_pinned_error(&balance) { break; @@ -406,25 +462,27 @@ pub async fn query_pinned_balance< } // If a suitable viewing key was not found, then demand it from the user if is_pinned_error(&balance) { - let vk_str = prompt!("Enter the viewing key for {}: ", owner); + let vk_str = + prompt!(IO, "Enter the viewing key for {}: ", owner).await; let fvk = match ExtendedViewingKey::from_str(vk_str.trim()) { Ok(fvk) => fvk, _ => { - eprintln!("Invalid viewing key entered"); + edisplay_line!(IO, "Invalid viewing key entered"); continue; } }; let vk = ExtendedFullViewingKey::from(fvk).fvk.vk; // Use the given viewing key to decrypt pinned transaction data balance = shielded - .compute_exchanged_pinned_balance(client, owner, &vk) + .compute_exchanged_pinned_balance::<_, IO>(client, owner, &vk) .await } // Now print out the received quantities according to CLI arguments match (balance, args.token.as_ref()) { (Err(Error::Pinned(PinnedBalanceError::InvalidViewingKey)), _) => { - println!( + display_line!( + IO, "Supplied viewing key cannot decode transactions to given \ payment address." ) @@ -433,10 +491,14 @@ pub async fn query_pinned_balance< Err(Error::Pinned(PinnedBalanceError::NoTransactionPinned)), _, ) => { - println!("Payment address {} has not yet been consumed.", owner) + display_line!( + IO, + "Payment address {} has not yet been consumed.", + owner + ) } (Err(other), _) => { - println!("Error in Querying Pinned balance {}", other) + display_line!(IO, "Error in Querying Pinned balance {}", other) } (Ok((balance, epoch)), Some(token)) => { let token_alias = wallet.lookup_alias(token); @@ -447,22 +509,29 @@ pub async fn query_pinned_balance< .unwrap_or_default(); if total_balance.is_zero() { - println!( + display_line!( + IO, "Payment address {} was consumed during epoch {}. \ Received no shielded {}", - owner, epoch, token_alias + owner, + epoch, + token_alias ); } else { - let formatted = format_denominated_amount( + let formatted = format_denominated_amount::<_, IO>( client, token, total_balance.into(), ) .await; - println!( + display_line!( + IO, "Payment address {} was consumed during epoch {}. \ Received {} {}", - owner, epoch, formatted, token_alias, + owner, + epoch, + formatted, + token_alias, ); } } @@ -474,14 +543,16 @@ pub async fn query_pinned_balance< .filter(|((token_epoch, _), _)| *token_epoch == epoch) { if !found_any { - println!( + display_line!( + IO, "Payment address {} was consumed during epoch {}. \ Received:", - owner, epoch + owner, + epoch ); found_any = true; } - let formatted = format_denominated_amount( + let formatted = format_denominated_amount::<_, IO>( client, token_addr, (*value).into(), @@ -491,13 +562,15 @@ pub async fn query_pinned_balance< .get(token_addr) .map(|a| a.to_string()) .unwrap_or_else(|| token_addr.to_string()); - println!(" {}: {}", token_alias, formatted,); + display_line!(IO, " {}: {}", token_alias, formatted,); } if !found_any { - println!( + display_line!( + IO, "Payment address {} was consumed during epoch {}. \ Received no shielded assets.", - owner, epoch + owner, + epoch ); } } @@ -505,7 +578,7 @@ pub async fn query_pinned_balance< } } -async fn print_balances( +async fn print_balances( client: &C, wallet: &Wallet, balances: impl Iterator, @@ -526,7 +599,8 @@ async fn print_balances( owner.clone(), format!( ": {}, owned by {}", - format_denominated_amount(client, tok, balance).await, + format_denominated_amount::<_, IO>(client, tok, balance) + .await, wallet.lookup_alias(owner) ), ), @@ -555,45 +629,53 @@ async fn print_balances( } _ => { let token_alias = wallet.lookup_alias(&t); - writeln!(w, "Token {}", token_alias).unwrap(); + display_line!(IO, &mut w; "Token {}", token_alias).unwrap(); print_token = Some(t); } } // Print the balance - writeln!(w, "{}", s).unwrap(); + display_line!(IO, &mut w; "{}", s).unwrap(); print_num += 1; } if print_num == 0 { match (token, target) { - (Some(_), Some(target)) | (None, Some(target)) => writeln!( - w, + (Some(_), Some(target)) | (None, Some(target)) => display_line!( + IO, + &mut w; "No balances owned by {}", wallet.lookup_alias(target) ) .unwrap(), (Some(token), None) => { let token_alias = wallet.lookup_alias(token); - writeln!(w, "No balances for token {}", token_alias).unwrap() + display_line!(IO, &mut w; "No balances for token {}", token_alias).unwrap() } - (None, None) => writeln!(w, "No balances").unwrap(), + (None, None) => display_line!(IO, &mut w; "No balances").unwrap(), } } } /// Query Proposals -pub async fn query_proposal( +pub async fn query_proposal< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, args: args::QueryProposal, ) { - let current_epoch = query_and_print_epoch(client).await; + let current_epoch = query_and_print_epoch::<_, IO>(client).await; if let Some(id) = args.proposal_id { let proposal = query_proposal_by_id(client, id).await.unwrap(); if let Some(proposal) = proposal { - println!("{}", proposal.to_string_with_status(current_epoch)); + display_line!( + IO, + "{}", + proposal.to_string_with_status(current_epoch) + ); } else { - eprintln!("No proposal found with id: {}", id); + edisplay_line!(IO, "No proposal found with id: {}", id); } } else { let last_proposal_id_key = governance_storage::get_counter_key(); @@ -608,14 +690,14 @@ pub async fn query_proposal( 0 }; - println!("id: {}", last_proposal_id); + display_line!(IO, "id: {}", last_proposal_id); for id in from_id..last_proposal_id { let proposal = query_proposal_by_id(client, id) .await .unwrap() .expect("Proposal should be written to storage."); - println!("{}", proposal); + display_line!(IO, "{}", proposal); } } } @@ -625,13 +707,14 @@ pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, error::Error> { - namada::ledger::rpc::query_proposal_by_id(client, proposal_id).await + namada::sdk::rpc::query_proposal_by_id(client, proposal_id).await } /// Query token shielded balance(s) pub async fn query_shielded_balance< C: namada::ledger::queries::Client + Sync, U: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -658,7 +741,7 @@ pub async fn query_shielded_balance< // Save the update state so that future fetches can be short-circuited let _ = shielded.save().await; // The epoch is required to identify timestamped tokens - let epoch = query_and_print_epoch(client).await; + let epoch = query_and_print_epoch::<_, IO>(client).await; // Map addresses to token names let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); match (args.token, owner.is_some()) { @@ -675,7 +758,11 @@ pub async fn query_shielded_balance< .expect("context should contain viewing key") } else { shielded - .compute_exchanged_balance(client, &viewing_key, epoch) + .compute_exchanged_balance::<_, IO>( + client, + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key") @@ -688,15 +775,17 @@ pub async fn query_shielded_balance< .cloned() .unwrap_or_default(); if total_balance.is_zero() { - println!( + display_line!( + IO, "No shielded {} balance found for given key", token_alias ); } else { - println!( + display_line!( + IO, "{}: {}", token_alias, - format_denominated_amount( + format_denominated_amount::<_, IO>( client, &token, token::Amount::from(total_balance) @@ -720,7 +809,11 @@ pub async fn query_shielded_balance< .expect("context should contain viewing key") } else { shielded - .compute_exchanged_balance(client, &viewing_key, epoch) + .compute_exchanged_balance::<_, IO>( + client, + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key") @@ -742,7 +835,8 @@ pub async fn query_shielded_balance< // remove this from here, should not be making the // hashtable creation any uglier if balances.is_empty() { - println!( + display_line!( + IO, "No shielded {} balance found for any wallet key", &token_addr ); @@ -758,14 +852,14 @@ pub async fn query_shielded_balance< .get(&token) .map(|a| a.to_string()) .unwrap_or_else(|| token.to_string()); - println!("Shielded Token {}:", alias); - let formatted = format_denominated_amount( + display_line!(IO, "Shielded Token {}:", alias); + let formatted = format_denominated_amount::<_, IO>( client, &token, token_balance.into(), ) .await; - println!(" {}, owned by {}", formatted, fvk); + display_line!(IO, " {}, owned by {}", formatted, fvk); } } // Here the user wants to know the balance for a specific token across @@ -781,10 +875,10 @@ pub async fn query_shielded_balance< ) .unwrap(); let token_alias = wallet.lookup_alias(&token); - println!("Shielded Token {}:", token_alias); + display_line!(IO, "Shielded Token {}:", token_alias); let mut found_any = false; let token_alias = wallet.lookup_alias(&token); - println!("Shielded Token {}:", token_alias,); + display_line!(IO, "Shielded Token {}:", token_alias,); for fvk in viewing_keys { // Query the multi-asset balance at the given spending key let viewing_key = ExtendedFullViewingKey::from(fvk).fvk.vk; @@ -796,7 +890,11 @@ pub async fn query_shielded_balance< .expect("context should contain viewing key") } else { shielded - .compute_exchanged_balance(client, &viewing_key, epoch) + .compute_exchanged_balance::<_, IO>( + client, + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key") @@ -806,17 +904,18 @@ pub async fn query_shielded_balance< if !val.is_zero() { found_any = true; } - let formatted = format_denominated_amount( + let formatted = format_denominated_amount::<_, IO>( client, address, (*val).into(), ) .await; - println!(" {}, owned by {}", formatted, fvk); + display_line!(IO, " {}, owned by {}", formatted, fvk); } } if !found_any { - println!( + display_line!( + IO, "No shielded {} balance found for any wallet key", token_alias, ); @@ -834,15 +933,23 @@ pub async fn query_shielded_balance< .unwrap() .expect("context should contain viewing key"); // Print balances by human-readable token names - print_decoded_balance_with_epoch(client, wallet, balance).await; + print_decoded_balance_with_epoch::<_, IO>( + client, wallet, balance, + ) + .await; } else { let balance = shielded - .compute_exchanged_balance(client, &viewing_key, epoch) + .compute_exchanged_balance::<_, IO>( + client, + &viewing_key, + epoch, + ) .await .unwrap() .expect("context should contain viewing key"); // Print balances by human-readable token names - print_decoded_balance(client, wallet, balance, epoch).await; + print_decoded_balance::<_, IO>(client, wallet, balance, epoch) + .await; } } } @@ -850,6 +957,7 @@ pub async fn query_shielded_balance< pub async fn print_decoded_balance< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -857,17 +965,22 @@ pub async fn print_decoded_balance< epoch: Epoch, ) { if decoded_balance.is_empty() { - println!("No shielded balance found for given key"); + display_line!(IO, "No shielded balance found for given key"); } else { for ((_, token_addr), amount) in decoded_balance .iter() .filter(|((token_epoch, _), _)| *token_epoch == epoch) { - println!( + display_line!( + IO, "{} : {}", wallet.lookup_alias(token_addr), - format_denominated_amount(client, token_addr, (*amount).into()) - .await, + format_denominated_amount::<_, IO>( + client, + token_addr, + (*amount).into() + ) + .await, ); } } @@ -875,6 +988,7 @@ pub async fn print_decoded_balance< pub async fn print_decoded_balance_with_epoch< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -882,7 +996,7 @@ pub async fn print_decoded_balance_with_epoch< ) { let tokens = wallet.get_addresses_with_vp_type(AddressVpType::Token); if decoded_balance.is_empty() { - println!("No shielded balance found for given key"); + display_line!(IO, "No shielded balance found for given key"); } for ((epoch, token_addr), value) in decoded_balance.iter() { let asset_value = (*value).into(); @@ -890,11 +1004,13 @@ pub async fn print_decoded_balance_with_epoch< .get(token_addr) .map(|a| a.to_string()) .unwrap_or_else(|| token_addr.to_string()); - println!( + display_line!( + IO, "{} | {} : {}", alias, epoch, - format_denominated_amount(client, token_addr, asset_value).await, + format_denominated_amount::<_, IO>(client, token_addr, asset_value) + .await, ); } } @@ -905,13 +1021,14 @@ pub async fn get_token_balance( token: &Address, owner: &Address, ) -> token::Amount { - namada::ledger::rpc::get_token_balance(client, token, owner) + namada::sdk::rpc::get_token_balance(client, token, owner) .await .unwrap() } pub async fn query_proposal_result< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, args: args::QueryProposalResult, @@ -924,7 +1041,7 @@ pub async fn query_proposal_result< { proposal } else { - eprintln!("Proposal {} not found.", proposal_id); + edisplay_line!(IO, "Proposal {} not found.", proposal_id); return; }; @@ -946,8 +1063,8 @@ pub async fn query_proposal_result< let proposal_result = compute_proposal_result(votes, total_voting_power, tally_type); - println!("Proposal Id: {} ", proposal_id); - println!("{:4}{}", "", proposal_result); + display_line!(IO, "Proposal Id: {} ", proposal_id); + display_line!(IO, "{:4}{}", "", proposal_result); } else { let proposal_folder = args.proposal_folder.expect( "The argument --proposal-folder is required with --offline.", @@ -983,11 +1100,14 @@ pub async fn query_proposal_result< if proposal.is_ok() { proposal.unwrap() } else { - eprintln!("The offline proposal is not valid."); + edisplay_line!(IO, "The offline proposal is not valid."); return; } } else { - eprintln!("Couldn't find a file name offline_proposal_*.json."); + edisplay_line!( + IO, + "Couldn't find a file name offline_proposal_*.json." + ); return; }; @@ -1001,9 +1121,12 @@ pub async fn query_proposal_result< }) .collect::>(); - let proposal_votes = - compute_offline_proposal_votes(client, &proposal, votes.clone()) - .await; + let proposal_votes = compute_offline_proposal_votes::<_, IO>( + client, + &proposal, + votes.clone(), + ) + .await; let total_voting_power = get_total_staked_tokens(client, proposal.proposal.tally_epoch) .await; @@ -1014,30 +1137,33 @@ pub async fn query_proposal_result< TallyType::TwoThird, ); - println!("Proposal offline: {}", proposal.proposal.hash()); - println!("Parsed {} votes.", votes.len()); - println!("{:4}{}", "", proposal_result); + display_line!(IO, "Proposal offline: {}", proposal.proposal.hash()); + display_line!(IO, "Parsed {} votes.", votes.len()); + display_line!(IO, "{:4}{}", "", proposal_result); } } -pub async fn query_account( +pub async fn query_account< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, args: args::QueryAccount, ) { let account = rpc::get_account_info(client, &args.owner).await.unwrap(); if let Some(account) = account { - println!("Address: {}", account.address); - println!("Threshold: {}", account.threshold); - println!("Public keys:"); + display_line!(IO, "Address: {}", account.address); + display_line!(IO, "Threshold: {}", account.threshold); + display_line!(IO, "Public keys:"); for (public_key, _) in account.public_keys_map.pk_to_idx { - println!("- {}", public_key); + display_line!(IO, "- {}", public_key); } } else { - println!("No account exists for {}", args.owner); + display_line!(IO, "No account exists for {}", args.owner); } } -pub async fn query_pgf( +pub async fn query_pgf( client: &C, _args: args::QueryPgf, ) { @@ -1045,25 +1171,36 @@ pub async fn query_pgf( let fundings = query_pgf_fundings(client).await; match stewards.is_empty() { - true => println!("Pgf stewards: no stewards are currectly set."), + true => { + display_line!(IO, "Pgf stewards: no stewards are currectly set.") + } false => { - println!("Pgf stewards:"); + display_line!(IO, "Pgf stewards:"); for steward in stewards { - println!("{:4}- {}", "", steward.address); - println!("{:4} Reward distribution:", ""); + display_line!(IO, "{:4}- {}", "", steward.address); + display_line!(IO, "{:4} Reward distribution:", ""); for (address, percentage) in steward.reward_distribution { - println!("{:6}- {} to {}", "", percentage, address); + display_line!( + IO, + "{:6}- {} to {}", + "", + percentage, + address + ); } } } } match fundings.is_empty() { - true => println!("Pgf fundings: no fundings are currently set."), + true => { + display_line!(IO, "Pgf fundings: no fundings are currently set.") + } false => { - println!("Pgf fundings:"); + display_line!(IO, "Pgf fundings:"); for funding in fundings { - println!( + display_line!( + IO, "{:4}- {} for {}", "", funding.detail.target, @@ -1076,94 +1213,116 @@ pub async fn query_pgf( pub async fn query_protocol_parameters< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, _args: args::QueryProtocolParameters, ) { let governance_parameters = query_governance_parameters(client).await; - println!("Governance Parameters\n"); - println!( + display_line!(IO, "Governance Parameters\n"); + display_line!( + IO, "{:4}Min. proposal fund: {}", "", governance_parameters.min_proposal_fund.to_string_native() ); - println!( + display_line!( + IO, "{:4}Max. proposal code size: {}", - "", governance_parameters.max_proposal_code_size + "", + governance_parameters.max_proposal_code_size ); - println!( + display_line!( + IO, "{:4}Min. proposal voting period: {}", - "", governance_parameters.min_proposal_voting_period + "", + governance_parameters.min_proposal_voting_period ); - println!( + display_line!( + IO, "{:4}Max. proposal period: {}", - "", governance_parameters.max_proposal_period + "", + governance_parameters.max_proposal_period ); - println!( + display_line!( + IO, "{:4}Max. proposal content size: {}", - "", governance_parameters.max_proposal_content_size + "", + governance_parameters.max_proposal_content_size ); - println!( + display_line!( + IO, "{:4}Min. proposal grace epochs: {}", - "", governance_parameters.min_proposal_grace_epochs + "", + governance_parameters.min_proposal_grace_epochs ); let pgf_parameters = query_pgf_parameters(client).await; - println!("Public Goods Funding Parameters\n"); - println!( + display_line!(IO, "Public Goods Funding Parameters\n"); + display_line!( + IO, "{:4}Pgf inflation rate: {}", - "", pgf_parameters.pgf_inflation_rate + "", + pgf_parameters.pgf_inflation_rate ); - println!( + display_line!( + IO, "{:4}Steward inflation rate: {}", - "", pgf_parameters.stewards_inflation_rate + "", + pgf_parameters.stewards_inflation_rate ); - println!("Protocol parameters"); + display_line!(IO, "Protocol parameters"); let key = param_storage::get_epoch_duration_storage_key(); let epoch_duration = query_storage_value::(client, &key) .await .expect("Parameter should be definied."); - println!( + display_line!( + IO, "{:4}Min. epoch duration: {}", - "", epoch_duration.min_duration + "", + epoch_duration.min_duration ); - println!( + display_line!( + IO, "{:4}Min. number of blocks: {}", - "", epoch_duration.min_num_of_blocks + "", + epoch_duration.min_num_of_blocks ); let key = param_storage::get_max_expected_time_per_block_key(); let max_block_duration = query_storage_value::(client, &key) .await .expect("Parameter should be defined."); - println!("{:4}Max. block duration: {}", "", max_block_duration); + display_line!(IO, "{:4}Max. block duration: {}", "", max_block_duration); let key = param_storage::get_tx_whitelist_storage_key(); let vp_whitelist = query_storage_value::>(client, &key) .await .expect("Parameter should be defined."); - println!("{:4}VP whitelist: {:?}", "", vp_whitelist); + display_line!(IO, "{:4}VP whitelist: {:?}", "", vp_whitelist); let key = param_storage::get_tx_whitelist_storage_key(); let tx_whitelist = query_storage_value::>(client, &key) .await .expect("Parameter should be defined."); - println!("{:4}Transactions whitelist: {:?}", "", tx_whitelist); + display_line!(IO, "{:4}Transactions whitelist: {:?}", "", tx_whitelist); let key = param_storage::get_max_block_gas_key(); let max_block_gas = query_storage_value::(client, &key) .await .expect("Parameter should be defined."); - println!("{:4}Max block gas: {:?}", "", max_block_gas); + display_line!(IO, "{:4}Max block gas: {:?}", "", max_block_gas); let key = param_storage::get_fee_unshielding_gas_limit_key(); let fee_unshielding_gas_limit = query_storage_value::(client, &key) .await .expect("Parameter should be defined."); - println!( + display_line!( + IO, "{:4}Fee unshielding gas limit: {:?}", - "", fee_unshielding_gas_limit + "", + fee_unshielding_gas_limit ); let key = param_storage::get_fee_unshielding_descriptions_limit_key(); @@ -1171,9 +1330,11 @@ pub async fn query_protocol_parameters< query_storage_value::(client, &key) .await .expect("Parameter should be defined."); - println!( + display_line!( + IO, "{:4}Fee unshielding descriptions limit: {:?}", - "", fee_unshielding_descriptions_limit + "", + fee_unshielding_descriptions_limit ); let key = param_storage::get_gas_cost_key(); @@ -1183,36 +1344,51 @@ pub async fn query_protocol_parameters< >(client, &key) .await .expect("Parameter should be defined."); - println!("{:4}Gas cost table:", ""); + display_line!(IO, "{:4}Gas cost table:", ""); for (token, gas_cost) in gas_cost_table { - println!("{:8}{}: {:?}", "", token, gas_cost); + display_line!(IO, "{:8}{}: {:?}", "", token, gas_cost); } - println!("PoS parameters"); + display_line!(IO, "PoS parameters"); let pos_params = query_pos_parameters(client).await; - println!( + display_line!( + IO, "{:4}Block proposer reward: {}", - "", pos_params.block_proposer_reward + "", + pos_params.block_proposer_reward ); - println!( + display_line!( + IO, "{:4}Block vote reward: {}", - "", pos_params.block_vote_reward + "", + pos_params.block_vote_reward ); - println!( + display_line!( + IO, "{:4}Duplicate vote minimum slash rate: {}", - "", pos_params.duplicate_vote_min_slash_rate + "", + pos_params.duplicate_vote_min_slash_rate ); - println!( + display_line!( + IO, "{:4}Light client attack minimum slash rate: {}", - "", pos_params.light_client_attack_min_slash_rate + "", + pos_params.light_client_attack_min_slash_rate ); - println!( + display_line!( + IO, "{:4}Max. validator slots: {}", - "", pos_params.max_validator_slots + "", + pos_params.max_validator_slots + ); + display_line!(IO, "{:4}Pipeline length: {}", "", pos_params.pipeline_len); + display_line!(IO, "{:4}Unbonding length: {}", "", pos_params.unbonding_len); + display_line!( + IO, + "{:4}Votes per token: {}", + "", + pos_params.tm_votes_per_token ); - println!("{:4}Pipeline length: {}", "", pos_params.pipeline_len); - println!("{:4}Unbonding length: {}", "", pos_params.unbonding_len); - println!("{:4}Votes per token: {}", "", pos_params.tm_votes_per_token); } pub async fn query_bond( @@ -1269,6 +1445,7 @@ pub async fn query_pgf_parameters( pub async fn query_and_print_unbonds< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, source: &Address, @@ -1289,16 +1466,18 @@ pub async fn query_and_print_unbonds< } } if total_withdrawable != token::Amount::default() { - println!( + display_line!( + IO, "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - println!("Current epoch: {current_epoch}.") + display_line!(IO, "Current epoch: {current_epoch}."); } for (withdraw_epoch, amount) in not_yet_withdrawable { - println!( + display_line!( + IO, "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native(), ); @@ -1322,12 +1501,12 @@ pub async fn query_withdrawable_tokens< } /// Query PoS bond(s) and unbond(s) -pub async fn query_bonds( +pub async fn query_bonds( client: &C, _wallet: &mut Wallet, args: args::QueryBonds, ) -> std::io::Result<()> { - let epoch = query_and_print_epoch(client).await; + let epoch = query_and_print_epoch::<_, IO>(client).await; let source = args.owner; let validator = args.validator; @@ -1349,24 +1528,26 @@ pub async fn query_bonds( bond_id.source, bond_id.validator ) }; - writeln!(w, "{}:", bond_type)?; + display_line!(IO, &mut w; "{}:", bond_type)?; for bond in &details.data.bonds { - writeln!( - w, + display_line!( + IO, + &mut w; " Remaining active bond from epoch {}: Δ {}", bond.start, bond.amount.to_string_native() )?; } if details.bonds_total != token::Amount::zero() { - writeln!( - w, + display_line!( + IO, + &mut w; "Active (slashed) bonds total: {}", details.bonds_total_active().to_string_native() )?; } - writeln!(w, "Bonds total: {}", details.bonds_total.to_string_native())?; - writeln!(w)?; + display_line!(IO, &mut w; "Bonds total: {}", details.bonds_total.to_string_native())?; + display_line!(IO, &mut w; "")?; if !details.data.unbonds.is_empty() { let bond_type = if bond_id.source == bond_id.validator { @@ -1374,38 +1555,43 @@ pub async fn query_bonds( } else { format!("Unbonded delegations from {}", bond_id.source) }; - writeln!(w, "{}:", bond_type)?; + display_line!(IO, &mut w; "{}:", bond_type)?; for unbond in &details.data.unbonds { - writeln!( - w, + display_line!( + IO, + &mut w; " Withdrawable from epoch {} (active from {}): Δ {}", unbond.withdraw, unbond.start, unbond.amount.to_string_native() )?; } - writeln!( - w, + display_line!( + IO, + &mut w; "Unbonded total: {}", details.unbonds_total.to_string_native() )?; } - writeln!( - w, + display_line!( + IO, + &mut w; "Withdrawable total: {}", details.total_withdrawable.to_string_native() )?; - writeln!(w)?; + display_line!(IO, &mut w; "")?; } if bonds_and_unbonds.bonds_total != bonds_and_unbonds.bonds_total_slashed { - writeln!( - w, + display_line!( + IO, + &mut w; "All bonds total active: {}", bonds_and_unbonds.bonds_total_active().to_string_native() )?; } - writeln!( - w, + display_line!( + IO, + &mut w; "All bonds total: {}", bonds_and_unbonds.bonds_total.to_string_native() )?; @@ -1413,19 +1599,22 @@ pub async fn query_bonds( if bonds_and_unbonds.unbonds_total != bonds_and_unbonds.unbonds_total_slashed { - writeln!( - w, + display_line!( + IO, + &mut w; "All unbonds total active: {}", bonds_and_unbonds.unbonds_total_active().to_string_native() )?; } - writeln!( - w, + display_line!( + IO, + &mut w; "All unbonds total: {}", bonds_and_unbonds.unbonds_total.to_string_native() )?; - writeln!( - w, + display_line!( + IO, + &mut w; "All unbonds total withdrawable: {}", bonds_and_unbonds.total_withdrawable.to_string_native() )?; @@ -1433,13 +1622,16 @@ pub async fn query_bonds( } /// Query PoS bonded stake -pub async fn query_bonded_stake( +pub async fn query_bonded_stake< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, args: args::QueryBondedStake, ) { let epoch = match args.epoch { Some(epoch) => epoch, - None => query_and_print_epoch(client).await, + None => query_and_print_epoch::<_, IO>(client).await, }; match args.validator { @@ -1451,13 +1643,14 @@ pub async fn query_bonded_stake( Some(stake) => { // TODO: show if it's in consensus set, below capacity, or // below threshold set - println!( + display_line!( + IO, "Bonded stake of validator {validator}: {}", stake.to_string_native() ) } None => { - println!("No bonded stake found for {validator}") + display_line!(IO, "No bonded stake found for {validator}"); } } } @@ -1481,10 +1674,11 @@ pub async fn query_bonded_stake( let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!(w, "Consensus validators:").unwrap(); + display_line!(IO, &mut w; "Consensus validators:").unwrap(); for val in consensus.into_iter().rev() { - writeln!( - w, + display_line!( + IO, + &mut w; " {}: {}", val.address.encode(), val.bonded_stake.to_string_native() @@ -1492,10 +1686,12 @@ pub async fn query_bonded_stake( .unwrap(); } if !below_capacity.is_empty() { - writeln!(w, "Below capacity validators:").unwrap(); + display_line!(IO, &mut w; "Below capacity validators:") + .unwrap(); for val in below_capacity.into_iter().rev() { - writeln!( - w, + display_line!( + IO, + &mut w; " {}: {}", val.address.encode(), val.bonded_stake.to_string_native() @@ -1507,7 +1703,8 @@ pub async fn query_bonded_stake( } let total_staked_tokens = get_total_staked_tokens(client, epoch).await; - println!( + display_line!( + IO, "Total bonded stake: {}", total_staked_tokens.to_string_native() ); @@ -1549,6 +1746,7 @@ pub async fn query_validator_state< /// Query a validator's state information pub async fn query_and_print_validator_state< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, _wallet: &mut Wallet, @@ -1561,22 +1759,32 @@ pub async fn query_and_print_validator_state< match state { Some(state) => match state { ValidatorState::Consensus => { - println!("Validator {validator} is in the consensus set") + display_line!( + IO, + "Validator {validator} is in the consensus set" + ) } ValidatorState::BelowCapacity => { - println!("Validator {validator} is in the below-capacity set") + display_line!( + IO, + "Validator {validator} is in the below-capacity set" + ) } ValidatorState::BelowThreshold => { - println!("Validator {validator} is in the below-threshold set") + display_line!( + IO, + "Validator {validator} is in the below-threshold set" + ) } ValidatorState::Inactive => { - println!("Validator {validator} is inactive") + display_line!(IO, "Validator {validator} is inactive") } ValidatorState::Jailed => { - println!("Validator {validator} is jailed") + display_line!(IO, "Validator {validator} is jailed") } }, - None => println!( + None => display_line!( + IO, "Validator {validator} is either not a validator, or an epoch \ before the current epoch has been queried (and the validator \ state information is no longer stored)" @@ -1587,6 +1795,7 @@ pub async fn query_and_print_validator_state< /// Query PoS validator's commission rate information pub async fn query_and_print_commission_rate< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, _wallet: &mut Wallet, @@ -1601,7 +1810,8 @@ pub async fn query_and_print_commission_rate< commission_rate: rate, max_commission_change_per_epoch: change, }) => { - println!( + display_line!( + IO, "Validator {} commission rate: {}, max change per epoch: {}", validator.encode(), rate, @@ -1609,7 +1819,8 @@ pub async fn query_and_print_commission_rate< ); } None => { - println!( + display_line!( + IO, "Address {} is not a validator (did not find commission rate \ and max change)", validator.encode(), @@ -1619,7 +1830,10 @@ pub async fn query_and_print_commission_rate< } /// Query PoS slashes -pub async fn query_slashes( +pub async fn query_slashes< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, _wallet: &mut Wallet, args: args::QuerySlashes, @@ -1632,12 +1846,13 @@ pub async fn query_slashes( RPC.vp().pos().validator_slashes(client, &validator).await, ); if !slashes.is_empty() { - println!("Processed slashes:"); + display_line!(IO, "Processed slashes:"); let stdout = io::stdout(); let mut w = stdout.lock(); for slash in slashes { - writeln!( - w, + display_line!( + IO, + &mut w; "Infraction epoch {}, block height {}, type {}, rate \ {}", slash.epoch, @@ -1648,7 +1863,8 @@ pub async fn query_slashes( .unwrap(); } } else { - println!( + display_line!( + IO, "No processed slashes found for {}", validator.encode() ) @@ -1664,14 +1880,15 @@ pub async fn query_slashes( >(RPC.vp().pos().enqueued_slashes(client).await); let enqueued_slashes = enqueued_slashes.get(&validator).cloned(); if let Some(enqueued) = enqueued_slashes { - println!("\nEnqueued slashes for future processing"); + display_line!(IO, "\nEnqueued slashes for future processing"); for (epoch, slashes) in enqueued { - println!("To be processed in epoch {}", epoch); + display_line!(IO, "To be processed in epoch {}", epoch); for slash in slashes { let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!( - w, + display_line!( + IO, + &mut w; "Infraction epoch {}, block height {}, type {}", slash.epoch, slash.block_height, slash.r#type, ) @@ -1679,7 +1896,11 @@ pub async fn query_slashes( } } } else { - println!("No enqueued slashes found for {}", validator.encode()) + display_line!( + IO, + "No enqueued slashes found for {}", + validator.encode() + ) } } None => { @@ -1691,11 +1912,12 @@ pub async fn query_slashes( if !all_slashes.is_empty() { let stdout = io::stdout(); let mut w = stdout.lock(); - println!("Processed slashes:"); + display_line!(IO, "Processed slashes:"); for (validator, slashes) in all_slashes.into_iter() { for slash in slashes { - writeln!( - w, + display_line!( + IO, + &mut w; "Infraction epoch {}, block height {}, rate {}, \ type {}, validator {}", slash.epoch, @@ -1708,7 +1930,7 @@ pub async fn query_slashes( } } } else { - println!("No processed slashes found") + display_line!(IO, "No processed slashes found") } // Find enqueued slashes to be processed in the future for the given @@ -1721,15 +1943,20 @@ pub async fn query_slashes( HashMap>>, >(RPC.vp().pos().enqueued_slashes(client).await); if !enqueued_slashes.is_empty() { - println!("\nEnqueued slashes for future processing"); + display_line!(IO, "\nEnqueued slashes for future processing"); for (validator, slashes_by_epoch) in enqueued_slashes { for (epoch, slashes) in slashes_by_epoch { - println!("\nTo be processed in epoch {}", epoch); + display_line!( + IO, + "\nTo be processed in epoch {}", + epoch + ); for slash in slashes { let stdout = io::stdout(); let mut w = stdout.lock(); - writeln!( - w, + display_line!( + IO, + &mut w; "Infraction epoch {}, block height {}, type \ {}, validator {}", slash.epoch, @@ -1742,13 +1969,19 @@ pub async fn query_slashes( } } } else { - println!("\nNo enqueued slashes found for future processing") + display_line!( + IO, + "\nNo enqueued slashes found for future processing" + ) } } } } -pub async fn query_delegations( +pub async fn query_delegations< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, _wallet: &mut Wallet, args: args::QueryDelegations, @@ -1758,22 +1991,26 @@ pub async fn query_delegations( RPC.vp().pos().delegation_validators(client, &owner).await, ); if delegations.is_empty() { - println!("No delegations found"); + display_line!(IO, "No delegations found"); } else { - println!("Found delegations to:"); + display_line!(IO, "Found delegations to:"); for delegation in delegations { - println!(" {delegation}"); + display_line!(IO, " {delegation}"); } } } -pub async fn query_find_validator( +pub async fn query_find_validator< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, args: args::QueryFindValidator, ) { let args::QueryFindValidator { query: _, tm_addr } = args; if tm_addr.len() != 40 { - eprintln!( + edisplay_line!( + IO, "Expected 40 characters in Tendermint address, got {}", tm_addr.len() ); @@ -1784,15 +2021,20 @@ pub async fn query_find_validator( RPC.vp().pos().validator_by_tm_addr(client, &tm_addr).await, ); match validator { - Some(address) => println!("Found validator address \"{address}\"."), + Some(address) => { + display_line!(IO, "Found validator address \"{address}\".") + } None => { - println!("No validator with Tendermint address {tm_addr} found.") + display_line!( + IO, + "No validator with Tendermint address {tm_addr} found." + ) } } } /// Dry run a transaction -pub async fn dry_run_tx( +pub async fn dry_run_tx( client: &C, tx_bytes: Vec, ) -> Result<(), error::Error> @@ -1800,9 +2042,10 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - println!( + display_line!( + IO, "Dry-run result: {}", - namada::ledger::rpc::dry_run_tx(client, tx_bytes).await? + rpc::dry_run_tx::<_, IO>(client, tx_bytes).await? ); Ok(()) } @@ -1813,7 +2056,7 @@ pub async fn get_public_key( address: &Address, index: u8, ) -> Result, error::Error> { - namada::ledger::rpc::get_public_key_at(client, address, index).await + rpc::get_public_key_at(client, address, index).await } /// Check if the given address is a known validator. @@ -1821,7 +2064,7 @@ pub async fn is_validator( client: &C, address: &Address, ) -> bool { - namada::ledger::rpc::is_validator(client, address) + namada::sdk::rpc::is_validator(client, address) .await .unwrap() } @@ -1831,7 +2074,7 @@ pub async fn is_delegator( client: &C, address: &Address, ) -> bool { - namada::ledger::rpc::is_delegator(client, address) + namada::sdk::rpc::is_delegator(client, address) .await .unwrap() } @@ -1841,7 +2084,7 @@ pub async fn is_delegator_at( address: &Address, epoch: Epoch, ) -> bool { - namada::ledger::rpc::is_delegator_at(client, address, epoch) + namada::sdk::rpc::is_delegator_at(client, address, epoch) .await .unwrap() } @@ -1853,13 +2096,16 @@ pub async fn known_address( client: &C, address: &Address, ) -> bool { - namada::ledger::rpc::known_address(client, address) + namada::sdk::rpc::known_address(client, address) .await .unwrap() } /// Query for all conversions. -pub async fn query_conversions( +pub async fn query_conversions< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, wallet: &mut Wallet, args: args::QueryConversions, @@ -1892,7 +2138,8 @@ pub async fn query_conversions( } conversions_found = true; // Print the asset to which the conversion applies - print!( + display!( + IO, "{}[{}]: ", tokens.get(addr).cloned().unwrap_or_else(|| addr.clone()), epoch, @@ -1904,7 +2151,8 @@ pub async fn query_conversions( // printing let ((addr, _), epoch, _, _) = &conv_state.assets[asset_type]; // Now print out this component of the conversion - print!( + display!( + IO, "{}{} {}[{}]", prefix, val, @@ -1915,10 +2163,13 @@ pub async fn query_conversions( prefix = " + "; } // Allowed conversions are always implicit equations - println!(" = 0"); + display_line!(IO, " = 0"); } if !conversions_found { - println!("No conversions found satisfying specified criteria."); + display_line!( + IO, + "No conversions found satisfying specified criteria." + ); } } @@ -1933,15 +2184,18 @@ pub async fn query_conversion( masp_primitives::transaction::components::I32Sum, MerklePath, )> { - namada::ledger::rpc::query_conversion(client, asset_type).await + namada::sdk::rpc::query_conversion(client, asset_type).await } /// Query a wasm code hash -pub async fn query_wasm_code_hash( +pub async fn query_wasm_code_hash< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, code_path: impl AsRef, ) -> Result { - namada::ledger::rpc::query_wasm_code_hash(client, code_path).await + rpc::query_wasm_code_hash::<_, IO>(client, code_path).await } /// Query a storage value and decode it with [`BorshDeserialize`]. @@ -1952,7 +2206,7 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - namada::ledger::rpc::query_storage_value(client, key).await + namada::sdk::rpc::query_storage_value(client, key).await } /// Query a storage value and the proof without decoding. @@ -1964,7 +2218,7 @@ pub async fn query_storage_value_bytes< height: Option, prove: bool, ) -> (Option>, Option) { - namada::ledger::rpc::query_storage_value_bytes(client, key, height, prove) + namada::sdk::rpc::query_storage_value_bytes(client, key, height, prove) .await .unwrap() } @@ -1975,6 +2229,7 @@ pub async fn query_storage_value_bytes< pub async fn query_storage_prefix< C: namada::ledger::queries::Client + Sync, T, + IO: Io, >( client: &C, key: &storage::Key, @@ -1982,7 +2237,7 @@ pub async fn query_storage_prefix< where T: BorshDeserialize, { - namada::ledger::rpc::query_storage_prefix(client, key) + rpc::query_storage_prefix::<_, IO, _>(client, key) .await .unwrap() } @@ -1994,7 +2249,7 @@ pub async fn query_has_storage_key< client: &C, key: &storage::Key, ) -> bool { - namada::ledger::rpc::query_has_storage_key(client, key) + namada::sdk::rpc::query_has_storage_key(client, key) .await .unwrap() } @@ -2003,38 +2258,39 @@ pub async fn query_has_storage_key< /// the current status of a transation. pub async fn query_tx_events( client: &C, - tx_event_query: namada::ledger::rpc::TxEventQuery<'_>, + tx_event_query: namada::sdk::rpc::TxEventQuery<'_>, ) -> std::result::Result< Option, ::Error, > { - namada::ledger::rpc::query_tx_events(client, tx_event_query).await + namada::sdk::rpc::query_tx_events(client, tx_event_query).await } /// Lookup the full response accompanying the specified transaction event // TODO: maybe remove this in favor of `query_tx_status` pub async fn query_tx_response( client: &C, - tx_query: namada::ledger::rpc::TxEventQuery<'_>, + tx_query: namada::sdk::rpc::TxEventQuery<'_>, ) -> Result { - namada::ledger::rpc::query_tx_response(client, tx_query).await + namada::sdk::rpc::query_tx_response(client, tx_query).await } /// Lookup the results of applying the specified transaction to the /// blockchain. -pub async fn query_result( +pub async fn query_result( client: &C, args: args::QueryResult, ) { // First try looking up application event pertaining to given hash. let tx_response = query_tx_response( client, - namada::ledger::rpc::TxEventQuery::Applied(&args.tx_hash), + namada::sdk::rpc::TxEventQuery::Applied(&args.tx_hash), ) .await; match tx_response { Ok(result) => { - println!( + display_line!( + IO, "Transaction was applied with result: {}", serde_json::to_string_pretty(&result).unwrap() ) @@ -2043,17 +2299,18 @@ pub async fn query_result( // If this fails then instead look for an acceptance event. let tx_response = query_tx_response( client, - namada::ledger::rpc::TxEventQuery::Accepted(&args.tx_hash), + namada::sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), ) .await; match tx_response { - Ok(result) => println!( + Ok(result) => display_line!( + IO, "Transaction was accepted with result: {}", serde_json::to_string_pretty(&result).unwrap() ), Err(err2) => { // Print the errors that caused the lookups to fail - eprintln!("{}\n{}", err1, err2); + edisplay_line!(IO, "{}\n{}", err1, err2); cli::safe_exit(1) } } @@ -2061,16 +2318,16 @@ pub async fn query_result( } } -pub async fn epoch_sleep( +pub async fn epoch_sleep( client: &C, _args: args::Query, ) { - let start_epoch = query_and_print_epoch(client).await; + let start_epoch = query_and_print_epoch::<_, IO>(client).await; loop { tokio::time::sleep(core::time::Duration::from_secs(1)).await; let current_epoch = query_epoch(client).await.unwrap(); if current_epoch > start_epoch { - println!("Reached epoch {}", current_epoch); + display_line!(IO, "Reached epoch {}", current_epoch); break; } } @@ -2096,7 +2353,7 @@ pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> HashSet
{ - namada::ledger::rpc::get_all_validators(client, epoch) + namada::sdk::rpc::get_all_validators(client, epoch) .await .unwrap() } @@ -2107,7 +2364,7 @@ pub async fn get_total_staked_tokens< client: &C, epoch: Epoch, ) -> token::Amount { - namada::ledger::rpc::get_total_staked_tokens(client, epoch) + namada::sdk::rpc::get_total_staked_tokens(client, epoch) .await .unwrap() } @@ -2135,7 +2392,7 @@ pub async fn get_delegators_delegation< client: &C, address: &Address, ) -> HashSet
{ - namada::ledger::rpc::get_delegators_delegation(client, address) + namada::sdk::rpc::get_delegators_delegation(client, address) .await .unwrap() } @@ -2147,7 +2404,7 @@ pub async fn get_delegators_delegation_at< address: &Address, epoch: Epoch, ) -> HashMap { - namada::ledger::rpc::get_delegators_delegation_at(client, address, epoch) + namada::sdk::rpc::get_delegators_delegation_at(client, address, epoch) .await .unwrap() } @@ -2157,7 +2414,7 @@ pub async fn query_governance_parameters< >( client: &C, ) -> GovernanceParameters { - namada::ledger::rpc::query_governance_parameters(client).await + namada::sdk::rpc::query_governance_parameters(client).await } /// A helper to unwrap client's response. Will shut down process on error. @@ -2172,6 +2429,7 @@ fn unwrap_client_response( pub async fn compute_offline_proposal_votes< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, proposal: &OfflineSignedProposal, @@ -2219,7 +2477,8 @@ pub async fn compute_offline_proposal_votes< .insert(validator, delegator_stake); } } else { - println!( + display_line!( + IO, "Skipping vote, not a validator/delegator at epoch {}.", proposal.proposal.tally_epoch ); @@ -2241,7 +2500,7 @@ pub async fn compute_proposal_votes< proposal_id: u64, epoch: Epoch, ) -> ProposalVotes { - let votes = namada::ledger::rpc::query_proposal_votes(client, proposal_id) + let votes = namada::sdk::rpc::query_proposal_votes(client, proposal_id) .await .unwrap(); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index c3a46d3fd7f..35c17221985 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -13,17 +13,19 @@ use namada::core::ledger::governance::cli::offline::{ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; -use namada::ledger::rpc::{TxBroadcastData, TxResponse}; -use namada::ledger::wallet::{Wallet, WalletUtils}; -use namada::ledger::{masp, pos, signing, tx}; +use namada::ledger::pos; use namada::proof_of_stake::parameters::PosParams; use namada::proto::Tx; +use namada::sdk::rpc::{TxBroadcastData, TxResponse}; +use namada::sdk::wallet::{Wallet, WalletUtils}; +use namada::sdk::{error, masp, signing, tx}; use namada::tendermint_rpc::HttpClient; use namada::types::address::{Address, ImplicitAddress}; use namada::types::dec::Dec; -use namada::types::error; +use namada::types::io::Io; use namada::types::key::{self, *}; use namada::types::transaction::pos::InitValidator; +use namada::{display_line, edisplay_line}; use super::rpc; use crate::cli::{args, safe_exit, Context}; @@ -38,16 +40,24 @@ use crate::wallet::{ /// Wrapper around `signing::aux_signing_data` that stores the optional /// disposable address to the wallet -pub async fn aux_signing_data( +pub async fn aux_signing_data< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, wallet: &mut Wallet, args: &args::Tx, - owner: &Option
, + owner: Option
, default_signer: Option
, ) -> Result { - let signing_data = - signing::aux_signing_data(client, wallet, args, owner, default_signer) - .await?; + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + wallet, + args, + owner, + default_signer, + ) + .await?; if args.disposable_signing_key { if !(args.dry_run || args.dry_run_wrapper) { @@ -58,7 +68,8 @@ pub async fn aux_signing_data( ) })?; } else { - println!( + display_line!( + IO, "Transaction dry run. The disposable address will not be \ saved to wallet." ) @@ -69,7 +80,10 @@ pub async fn aux_signing_data( } // Build a transaction to reveal the signer of the given transaction. -pub async fn submit_reveal_aux( +pub async fn submit_reveal_aux< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, ctx: &mut Context, args: args::Tx, @@ -87,11 +101,16 @@ pub async fn submit_reveal_aux( let public_key = key.ref_to(); if tx::is_reveal_pk_needed::(client, address, args.force).await? { - let signing_data = - aux_signing_data(client, &mut ctx.wallet, &args, &None, None) - .await?; + let signing_data = aux_signing_data::<_, IO>( + client, + &mut ctx.wallet, + &args, + None, + None, + ) + .await?; - let (mut tx, _epoch) = tx::build_reveal_pk( + let (mut tx, _epoch) = tx::build_reveal_pk::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -102,38 +121,46 @@ pub async fn submit_reveal_aux( ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>( + client, + &mut ctx.wallet, + &tx, + ) + .await?; signing::sign_tx(&mut ctx.wallet, &args, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args, tx) + .await?; } } Ok(()) } -pub async fn submit_custom( +pub async fn submit_custom( client: &C, ctx: &mut Context, args: args::TxCustom, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { let default_signer = Some(args.owner.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.owner.clone()), + Some(args.owner.clone()), default_signer, ) .await?; - submit_reveal_aux(client, ctx, args.tx.clone(), &args.owner).await?; + submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &args.owner) + .await?; - let (mut tx, _epoch) = tx::build_custom( + let (mut tx, _epoch) = tx::build_custom::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -142,19 +169,21 @@ where ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_update_account( +pub async fn submit_update_account( client: &C, ctx: &mut Context, args: args::TxUpdateAccount, @@ -164,16 +193,16 @@ where C::Error: std::fmt::Display, { let default_signer = Some(args.addr.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.addr.clone()), + Some(args.addr.clone()), default_signer, ) .await?; - let (mut tx, _epoch) = tx::build_update_account( + let (mut tx, _epoch) = tx::build_update_account::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -182,31 +211,39 @@ where ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_init_account( +pub async fn submit_init_account( client: &C, ctx: &mut Context, args: args::TxInitAccount, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let signing_data = - aux_signing_data(client, &mut ctx.wallet, &args.tx, &None, None) - .await?; + let signing_data = aux_signing_data::<_, IO>( + client, + &mut ctx.wallet, + &args.tx, + None, + None, + ) + .await?; - let (mut tx, _epoch) = tx::build_init_account( + let (mut tx, _epoch) = tx::build_init_account::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -215,21 +252,21 @@ where ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_init_validator< - C: namada::ledger::queries::Client + Sync, ->( +pub async fn submit_init_validator( client: &C, mut ctx: Context, args::TxInitValidator { @@ -247,7 +284,10 @@ pub async fn submit_init_validator< unsafe_dont_encrypt, tx_code_path: _, }: args::TxInitValidator, -) -> Result<(), error::Error> { +) -> Result<(), error::Error> +where + C: namada::ledger::queries::Client + Sync, +{ let tx_args = args::Tx { chain_id: tx_args .clone() @@ -281,12 +321,12 @@ pub async fn submit_init_validator< .map(|key| match key { common::SecretKey::Ed25519(_) => key, common::SecretKey::Secp256k1(_) => { - eprintln!("Consensus key can only be ed25519"); + edisplay_line!(IO, "Consensus key can only be ed25519"); safe_exit(1) } }) .unwrap_or_else(|| { - println!("Generating consensus key..."); + display_line!(IO, "Generating consensus key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); ctx.wallet @@ -307,12 +347,12 @@ pub async fn submit_init_validator< .map(|key| match key { common::SecretKey::Secp256k1(_) => key.ref_to(), common::SecretKey::Ed25519(_) => { - eprintln!("Eth cold key can only be secp256k1"); + edisplay_line!(IO, "Eth cold key can only be secp256k1"); safe_exit(1) } }) .unwrap_or_else(|| { - println!("Generating Eth cold key..."); + display_line!(IO, "Generating Eth cold key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); ctx.wallet @@ -334,12 +374,12 @@ pub async fn submit_init_validator< .map(|key| match key { common::SecretKey::Secp256k1(_) => key.ref_to(), common::SecretKey::Ed25519(_) => { - eprintln!("Eth hot key can only be secp256k1"); + edisplay_line!(IO, "Eth hot key can only be secp256k1"); safe_exit(1) } }) .unwrap_or_else(|| { - println!("Generating Eth hot key..."); + display_line!(IO, "Generating Eth hot key..."); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); ctx.wallet @@ -358,7 +398,7 @@ pub async fn submit_init_validator< }); if protocol_key.is_none() { - println!("Generating protocol signing key..."); + display_line!(IO, "Generating protocol signing key..."); } // Generate the validator keys let validator_keys = gen_validator_keys( @@ -375,14 +415,17 @@ pub async fn submit_init_validator< .expect("DKG sessions keys should have been created") .public(); - let validator_vp_code_hash = - query_wasm_code_hash(client, validator_vp_code_path.to_str().unwrap()) - .await - .unwrap(); + let validator_vp_code_hash = query_wasm_code_hash::( + client, + validator_vp_code_path.to_str().unwrap(), + ) + .await + .unwrap(); // Validate the commission rate data if commission_rate > Dec::one() || commission_rate < Dec::zero() { - eprintln!( + edisplay_line!( + IO, "The validator commission rate must not exceed 1.0 or 100%, and \ it must be 0 or positive" ); @@ -393,7 +436,8 @@ pub async fn submit_init_validator< if max_commission_rate_change > Dec::one() || max_commission_rate_change < Dec::zero() { - eprintln!( + edisplay_line!( + IO, "The validator maximum change in commission rate per epoch must \ not exceed 1.0 or 100%" ); @@ -402,7 +446,7 @@ pub async fn submit_init_validator< } } let tx_code_hash = - query_wasm_code_hash(client, args::TX_INIT_VALIDATOR_WASM) + query_wasm_code_hash::<_, IO>(client, args::TX_INIT_VALIDATOR_WASM) .await .unwrap(); @@ -428,11 +472,16 @@ pub async fn submit_init_validator< tx.add_code_from_hash(tx_code_hash).add_data(data); - let signing_data = - aux_signing_data(client, &mut ctx.wallet, &tx_args, &None, None) - .await?; + let signing_data = signing::aux_signing_data::<_, _, IO>( + client, + &mut ctx.wallet, + &tx_args, + None, + None, + ) + .await?; - tx::prepare_tx( + tx::prepare_tx::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -440,21 +489,21 @@ pub async fn submit_init_validator< &mut tx, signing_data.fee_payer.clone(), None, - #[cfg(not(feature = "mainnet"))] - false, ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if tx_args.dump_tx { - tx::dump_tx(&tx_args, tx); + tx::dump_tx::(&tx_args, tx); } else { signing::sign_tx(&mut ctx.wallet, &tx_args, &mut tx, signing_data)?; - let result = tx::process_tx(client, &mut ctx.wallet, &tx_args, tx) - .await? - .initialized_accounts(); + let result = + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &tx_args, tx) + .await? + .initialized_accounts(); if !tx_args.dry_run { let (validator_address_alias, validator_address) = match &result[..] @@ -466,12 +515,15 @@ pub async fn submit_init_validator< { (alias.clone(), validator_address.clone()) } else { - eprintln!("Expected one account to be created"); + edisplay_line!( + IO, + "Expected one account to be created" + ); safe_exit(1) } } _ => { - eprintln!("Expected one account to be created"); + edisplay_line!(IO, "Expected one account to be created"); safe_exit(1) } }; @@ -479,7 +531,7 @@ pub async fn submit_init_validator< ctx.wallet .add_validator_data(validator_address, validator_keys); crate::wallet::save(&ctx.wallet) - .unwrap_or_else(|err| eprintln!("{}", err)); + .unwrap_or_else(|err| edisplay_line!(IO, "{}", err)); let tendermint_home = ctx.config.ledger.cometbft_dir(); tendermint_node::write_validator_key( @@ -505,24 +557,38 @@ pub async fn submit_init_validator< .await .expect("Pos parameter should be defined."); - println!(); - println!( + display_line!(IO, ""); + display_line!( + IO, "The validator's addresses and keys were stored in the wallet:" ); - println!(" Validator address \"{}\"", validator_address_alias); - println!(" Validator account key \"{}\"", validator_key_alias); - println!(" Consensus key \"{}\"", consensus_key_alias); - println!( + display_line!( + IO, + " Validator address \"{}\"", + validator_address_alias + ); + display_line!( + IO, + " Validator account key \"{}\"", + validator_key_alias + ); + display_line!(IO, " Consensus key \"{}\"", consensus_key_alias); + display_line!( + IO, "The ledger node has been setup to use this validator's \ address and consensus key." ); - println!( + display_line!( + IO, "Your validator will be active in {} epochs. Be sure to \ restart your node for the changes to take effect!", pos_params.pipeline_len ); } else { - println!("Transaction dry run. No addresses have been saved."); + display_line!( + IO, + "Transaction dry run. No addresses have been saved." + ); } } Ok(()) @@ -541,7 +607,7 @@ pub struct CLIShieldedUtils { impl CLIShieldedUtils { /// Initialize a shielded transaction context that identifies notes /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> masp::ShieldedContext { + pub fn new(context_dir: PathBuf) -> masp::ShieldedContext { // Make sure that MASP parameters are downloaded to enable MASP // transaction building and verification later on let params_dir = masp::get_params_dir(); @@ -552,10 +618,13 @@ impl CLIShieldedUtils { && convert_path.exists() && output_path.exists()) { - println!("MASP parameters not present, downloading..."); + display_line!(IO, "MASP parameters not present, downloading..."); masp_proofs::download_masp_parameters(None) .expect("MASP parameters not present or downloadable"); - println!("MASP parameter download complete, resuming execution..."); + display_line!( + IO, + "MASP parameter download complete, resuming execution..." + ); } // Finally initialize a shielded context with the supplied directory let utils = Self { context_dir }; @@ -636,23 +705,26 @@ impl masp::ShieldedUtils for CLIShieldedUtils { } } -pub async fn submit_transfer( +pub async fn submit_transfer< + C: namada::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, mut ctx: Context, args: args::TxTransfer, ) -> Result<(), error::Error> { for _ in 0..2 { let default_signer = Some(args.source.effective_address()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.source.effective_address()), + Some(args.source.effective_address()), default_signer, ) .await?; - submit_reveal_aux( + submit_reveal_aux::<_, IO>( client, &mut ctx, args.tx.clone(), @@ -661,7 +733,7 @@ pub async fn submit_transfer( .await?; let arg = args.clone(); - let (mut tx, tx_epoch) = tx::build_transfer( + let (mut tx, tx_epoch) = tx::build_transfer::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -669,17 +741,24 @@ pub async fn submit_transfer( signing_data.fee_payer.clone(), ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); break; } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - let result = - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + let result = tx::process_tx::<_, _, IO>( + client, + &mut ctx.wallet, + &args.tx, + tx, + ) + .await?; - let submission_epoch = rpc::query_and_print_epoch(client).await; + let submission_epoch = + rpc::query_and_print_epoch::<_, IO>(client).await; match result { ProcessTxResponse::Applied(resp) if @@ -691,7 +770,7 @@ pub async fn submit_transfer( tx_epoch.unwrap() != submission_epoch => { // Then we probably straddled an epoch boundary. Let's retry... - eprintln!( + edisplay_line!(IO, "MASP transaction rejected and this may be due to the \ epoch changing. Attempting to resubmit transaction.", ); @@ -707,27 +786,29 @@ pub async fn submit_transfer( Ok(()) } -pub async fn submit_ibc_transfer( +pub async fn submit_ibc_transfer( client: &C, mut ctx: Context, args: args::TxIbcTransfer, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { let default_signer = Some(args.source.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.source.clone()), + Some(args.source.clone()), default_signer, ) .await?; - submit_reveal_aux(client, &mut ctx, args.tx.clone(), &args.source).await?; + submit_reveal_aux::<_, IO>(client, &mut ctx, args.tx.clone(), &args.source) + .await?; - let (mut tx, _epoch) = tx::build_ibc_transfer( + let (mut tx, _epoch) = tx::build_ibc_transfer::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -735,27 +816,30 @@ where signing_data.fee_payer.clone(), ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_init_proposal( +pub async fn submit_init_proposal( client: &C, mut ctx: Context, args: args::InitProposal, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let current_epoch = rpc::query_and_print_epoch(client).await; + let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; let governance_parameters = rpc::query_governance_parameters(client).await; let ((mut tx_builder, _fee_unshield_epoch), signing_data) = if args @@ -771,11 +855,11 @@ where .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let default_signer = Some(proposal.author.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(proposal.author.clone()), + Some(proposal.author.clone()), default_signer, ) .await?; @@ -792,7 +876,7 @@ where ) })?; - println!("Proposal serialized to: {}", output_file_path); + display_line!(IO, "Proposal serialized to: {}", output_file_path); return Ok(()); } else if args.is_pgf_funding { let proposal = @@ -806,16 +890,16 @@ where .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(proposal.proposal.author.clone()), + Some(proposal.proposal.author.clone()), default_signer, ) .await?; - submit_reveal_aux( + submit_reveal_aux::<_, IO>( client, &mut ctx, args.tx.clone(), @@ -824,7 +908,7 @@ where .await?; ( - tx::build_pgf_funding_proposal( + tx::build_pgf_funding_proposal::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -858,16 +942,16 @@ where .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(proposal.proposal.author.clone()), + Some(proposal.proposal.author.clone()), default_signer, ) .await?; - submit_reveal_aux( + submit_reveal_aux::<_, IO>( client, &mut ctx, args.tx.clone(), @@ -876,7 +960,7 @@ where .await?; ( - tx::build_pgf_stewards_proposal( + tx::build_pgf_stewards_proposal::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -908,16 +992,16 @@ where .map_err(|e| error::TxError::InvalidProposal(e.to_string()))?; let default_signer = Some(proposal.proposal.author.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(proposal.proposal.author.clone()), + Some(proposal.proposal.author.clone()), default_signer, ) .await?; - submit_reveal_aux( + submit_reveal_aux::<_, IO>( client, &mut ctx, args.tx.clone(), @@ -926,7 +1010,7 @@ where .await?; ( - tx::build_default_proposal( + tx::build_default_proposal::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -938,10 +1022,15 @@ where signing_data, ) }; - signing::generate_test_vector(client, &mut ctx.wallet, &tx_builder).await?; + signing::generate_test_vector::<_, _, IO>( + client, + &mut ctx.wallet, + &tx_builder, + ) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx_builder); + tx::dump_tx::(&args.tx, tx_builder); } else { signing::sign_tx( &mut ctx.wallet, @@ -949,13 +1038,19 @@ where &mut tx_builder, signing_data, )?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx_builder).await?; + tx::process_tx::<_, _, IO>( + client, + &mut ctx.wallet, + &args.tx, + tx_builder, + ) + .await?; } Ok(()) } -pub async fn submit_vote_proposal( +pub async fn submit_vote_proposal( client: &C, mut ctx: Context, args: args::VoteProposal, @@ -964,14 +1059,14 @@ where C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - let current_epoch = rpc::query_and_print_epoch(client).await; + let current_epoch = rpc::query_and_print_epoch::<_, IO>(client).await; let default_signer = Some(args.voter.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.voter.clone()), + Some(args.voter.clone()), default_signer.clone(), ) .await?; @@ -1015,10 +1110,10 @@ where .serialize(args.tx.output_folder) .expect("Should be able to serialize the offline proposal"); - println!("Proposal vote serialized to: {}", output_file_path); + display_line!(IO, "Proposal vote serialized to: {}", output_file_path); return Ok(()); } else { - tx::build_vote_proposal( + tx::build_vote_proposal::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -1028,10 +1123,15 @@ where ) .await? }; - signing::generate_test_vector(client, &mut ctx.wallet, &tx_builder).await?; + signing::generate_test_vector::<_, _, IO>( + client, + &mut ctx.wallet, + &tx_builder, + ) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx_builder); + tx::dump_tx::(&args.tx, tx_builder); } else { signing::sign_tx( &mut ctx.wallet, @@ -1039,13 +1139,19 @@ where &mut tx_builder, signing_data, )?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx_builder).await?; + tx::process_tx::<_, _, IO>( + client, + &mut ctx.wallet, + &args.tx, + tx_builder, + ) + .await?; } Ok(()) } -pub async fn sign_tx( +pub async fn sign_tx( client: &C, ctx: &mut Context, args::SignTx { @@ -1061,16 +1167,16 @@ where let tx = if let Ok(transaction) = Tx::deserialize(tx_data.as_ref()) { transaction } else { - eprintln!("Couldn't decode the transaction."); + edisplay_line!(IO, "Couldn't decode the transaction."); safe_exit(1) }; let default_signer = Some(owner.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &tx_args, - &Some(owner), + Some(owner.clone()), default_signer, ) .await?; @@ -1084,7 +1190,8 @@ where { Some(secret_key) } else { - eprintln!( + edisplay_line!( + IO, "Couldn't find the secret key for {}. Skipping signature \ generation.", public_key @@ -1096,14 +1203,17 @@ where if let Some(account_public_keys_map) = signing_data.account_public_keys_map { - let signatures = - tx.compute_section_signature(secret_keys, &account_public_keys_map); + let signatures = tx.compute_section_signature( + secret_keys, + &account_public_keys_map, + Some(owner), + ); for signature in &signatures { let filename = format!( "offline_signature_{}_{}.tx", tx.header_hash(), - signature.index + signature.pubkey, ); let output_path = match &tx_args.output_folder { Some(path) => path.join(filename), @@ -1118,11 +1228,10 @@ where &signature.serialize(), ) .expect("Signature should be deserializable."); - println!( + display_line!( + IO, "Signature for {} serialized at {}", - &account_public_keys_map - .get_public_key_from_index(signature.index) - .unwrap(), + signature.pubkey, output_path.display() ); } @@ -1130,20 +1239,27 @@ where Ok(()) } -pub async fn submit_reveal_pk( +pub async fn submit_reveal_pk( client: &C, ctx: &mut Context, args: args::RevealPk, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { - submit_reveal_aux(client, ctx, args.tx, &(&args.public_key).into()).await?; + submit_reveal_aux::<_, IO>( + client, + ctx, + args.tx, + &(&args.public_key).into(), + ) + .await?; Ok(()) } -pub async fn submit_bond( +pub async fn submit_bond( client: &C, ctx: &mut Context, args: args::Bond, @@ -1154,18 +1270,19 @@ where { let default_address = args.source.clone().unwrap_or(args.validator.clone()); let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(default_address.clone()), + Some(default_address.clone()), default_signer, ) .await?; - submit_reveal_aux(client, ctx, args.tx.clone(), &default_address).await?; + submit_reveal_aux::<_, IO>(client, ctx, args.tx.clone(), &default_address) + .await?; - let (mut tx, _fee_unshield_epoch) = tx::build_bond( + let (mut tx, _fee_unshield_epoch) = tx::build_bond::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -1173,40 +1290,43 @@ where signing_data.fee_payer.clone(), ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_unbond( +pub async fn submit_unbond( client: &C, ctx: &mut Context, args: args::Unbond, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { let default_address = args.source.clone().unwrap_or(args.validator.clone()); let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data( + let signing_data = signing::aux_signing_data::<_, _, IO>( client, &mut ctx.wallet, &args.tx, - &Some(default_address), + Some(default_address), default_signer, ) .await?; let (mut tx, _fee_unshield_epoch, latest_withdrawal_pre) = - tx::build_unbond( + tx::build_unbond::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -1214,41 +1334,45 @@ where signing_data.fee_payer.clone(), ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; - tx::query_unbonds(client, args.clone(), latest_withdrawal_pre).await?; + tx::query_unbonds::<_, IO>(client, args.clone(), latest_withdrawal_pre) + .await?; } Ok(()) } -pub async fn submit_withdraw( +pub async fn submit_withdraw( client: &C, mut ctx: Context, args: args::Withdraw, ) -> Result<(), error::Error> where + C: namada::ledger::queries::Client + Sync, C::Error: std::fmt::Display, { let default_address = args.source.clone().unwrap_or(args.validator.clone()); let default_signer = Some(default_address.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(default_address), + Some(default_address), default_signer, ) .await?; - let (mut tx, _fee_unshield_epoch) = tx::build_withdraw( + let (mut tx, _fee_unshield_epoch) = tx::build_withdraw::<_, _, _, IO>( client, &mut ctx.wallet, &mut ctx.shielded, @@ -1256,55 +1380,58 @@ where signing_data.fee_payer.clone(), ) .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_validator_commission_change< - C: namada::ledger::queries::Client + Sync, ->( +pub async fn submit_validator_commission_change( client: &C, mut ctx: Context, args: args::CommissionRateChange, ) -> Result<(), error::Error> where - C::Error: std::fmt::Display, + C: namada::ledger::queries::Client + Sync, { let default_signer = Some(args.validator.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.validator.clone()), + Some(args.validator.clone()), default_signer, ) .await?; - let (mut tx, _fee_unshield_epoch) = tx::build_validator_commission_change( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + let (mut tx, _fee_unshield_epoch) = + tx::build_validator_commission_change::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + signing_data.fee_payer.clone(), + ) + .await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) @@ -1312,6 +1439,7 @@ where pub async fn submit_unjail_validator< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, mut ctx: Context, @@ -1321,31 +1449,34 @@ where C::Error: std::fmt::Display, { let default_signer = Some(args.validator.clone()); - let signing_data = aux_signing_data( + let signing_data = aux_signing_data::<_, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.validator.clone()), + Some(args.validator.clone()), default_signer, ) .await?; - let (mut tx, _fee_unshield_epoch) = tx::build_unjail_validator( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - signing_data.fee_payer.clone(), - ) - .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + let (mut tx, _fee_unshield_epoch) = + tx::build_unjail_validator::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + signing_data.fee_payer.clone(), + ) + .await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) @@ -1353,6 +1484,7 @@ where pub async fn submit_update_steward_commission< C: namada::ledger::queries::Client + Sync, + IO: Io, >( client: &C, mut ctx: Context, @@ -1363,37 +1495,40 @@ where C::Error: std::fmt::Display, { let default_signer = Some(args.steward.clone()); - let signing_data = signing::aux_signing_data( + let signing_data = signing::aux_signing_data::<_, _, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.steward.clone()), + Some(args.steward.clone()), default_signer, ) .await?; - let (mut tx, _fee_unshield_epoch) = tx::build_update_steward_commission( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; + let (mut tx, _fee_unshield_epoch) = + tx::build_update_steward_commission::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + &signing_data.fee_payer, + ) + .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } -pub async fn submit_resign_steward( +pub async fn submit_resign_steward( client: &C, mut ctx: Context, args: args::ResignSteward, @@ -1403,54 +1538,58 @@ where C::Error: std::fmt::Display, { let default_signer = Some(args.steward.clone()); - let signing_data = signing::aux_signing_data( + let signing_data = signing::aux_signing_data::<_, _, IO>( client, &mut ctx.wallet, &args.tx, - &Some(args.steward.clone()), + Some(args.steward.clone()), default_signer, ) .await?; - let (mut tx, _fee_unshield_epoch) = tx::build_resign_steward( - client, - &mut ctx.wallet, - &mut ctx.shielded, - args.clone(), - &signing_data.fee_payer, - ) - .await?; + let (mut tx, _fee_unshield_epoch) = + tx::build_resign_steward::<_, _, _, IO>( + client, + &mut ctx.wallet, + &mut ctx.shielded, + args.clone(), + &signing_data.fee_payer, + ) + .await?; - signing::generate_test_vector(client, &mut ctx.wallet, &tx).await?; + signing::generate_test_vector::<_, _, IO>(client, &mut ctx.wallet, &tx) + .await?; if args.tx.dump_tx { - tx::dump_tx(&args.tx, tx); + tx::dump_tx::(&args.tx, tx); } else { signing::sign_tx(&mut ctx.wallet, &args.tx, &mut tx, signing_data)?; - tx::process_tx(client, &mut ctx.wallet, &args.tx, tx).await?; + tx::process_tx::<_, _, IO>(client, &mut ctx.wallet, &args.tx, tx) + .await?; } Ok(()) } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( +pub async fn save_initialized_accounts( wallet: &mut Wallet, args: &args::Tx, initialized_accounts: Vec
, ) { - tx::save_initialized_accounts::(wallet, args, initialized_accounts).await + tx::save_initialized_accounts::(wallet, args, initialized_accounts) + .await } /// Broadcast a transaction to be included in the blockchain and checks that /// the tx has been successfully included into the mempool of a validator /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( +pub async fn broadcast_tx( rpc_cli: &HttpClient, to_broadcast: &TxBroadcastData, ) -> Result { - tx::broadcast_tx(rpc_cli, to_broadcast).await + tx::broadcast_tx::<_, IO>(rpc_cli, to_broadcast).await } /// Broadcast a transaction to be included in the blockchain. @@ -1461,9 +1600,9 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( +pub async fn submit_tx( client: &HttpClient, to_broadcast: TxBroadcastData, ) -> Result { - tx::submit_tx(client, to_broadcast).await + tx::submit_tx::<_, IO>(client, to_broadcast).await } diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 77b18f9136f..2bf3f23ab05 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::env; use std::fs::{self, File, OpenOptions}; use std::io::Write; -use std::net::SocketAddr; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -10,7 +9,7 @@ use borsh::BorshSerialize; use flate2::read::GzDecoder; use flate2::write::GzEncoder; use flate2::Compression; -use namada::ledger::wallet::Wallet; +use namada::sdk::wallet::Wallet; use namada::types::address; use namada::types::chain::ChainId; use namada::types::dec::Dec; @@ -799,8 +798,8 @@ pub fn init_network( config.ledger.cometbft.p2p.addr_book_strict = !localhost; // Clear the net address from the config and use it to set ports let net_address = validator_config.net_address.take().unwrap(); - let _ip = SocketAddr::from_str(&net_address).unwrap().ip(); - let first_port = SocketAddr::from_str(&net_address).unwrap().port(); + let split: Vec<&str> = net_address.split(':').collect(); + let first_port = split[1].parse::().unwrap(); if localhost { config.ledger.cometbft.p2p.laddr = TendermintAddress::from_str( &format!("127.0.0.1:{}", first_port), @@ -1050,7 +1049,7 @@ pub fn init_genesis_validator( tendermint_node_key: Some(HexString( pre_genesis.tendermint_node_key.ref_to().to_string(), )), - net_address: Some(net_address.to_string()), + net_address: Some(net_address), ..Default::default() }, )]), diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index 0e5fced67ee..ac3738a9bc8 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -6,8 +6,6 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; -#[cfg(not(feature = "mainnet"))] -use namada::core::ledger::testnet_pow; use namada::ledger::eth_bridge::EthereumBridgeConfig; use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; @@ -32,8 +30,6 @@ pub mod genesis_config { use eyre::Context; use namada::core::ledger::governance::parameters::GovernanceParameters; use namada::core::ledger::pgf::parameters::PgfParameters; - #[cfg(not(feature = "mainnet"))] - use namada::core::ledger::testnet_pow; use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{Dec, GenesisValidator, PosParams}; use namada::types::address::Address; @@ -42,7 +38,6 @@ pub mod genesis_config { use namada::types::key::*; use namada::types::time::Rfc3339String; use namada::types::token::Denomination; - use namada::types::uint::Uint; use namada::types::{storage, token}; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -116,13 +111,6 @@ pub mod genesis_config { // Name of the native token - this must one of the tokens included in // the `token` field pub native_token: String, - #[cfg(not(feature = "mainnet"))] - /// Testnet faucet PoW difficulty - defaults to `0` when not set - pub faucet_pow_difficulty: Option, - #[cfg(not(feature = "mainnet"))] - /// Testnet faucet withdrawal limit - defaults to 1000 tokens when not - /// set - pub faucet_withdrawal_limit: Option, // Initial validator set pub validator: HashMap, // Token accounts present at genesis @@ -535,10 +523,6 @@ pub mod genesis_config { let GenesisConfig { genesis_time, native_token, - #[cfg(not(feature = "mainnet"))] - faucet_pow_difficulty, - #[cfg(not(feature = "mainnet"))] - faucet_withdrawal_limit, validator, token, established, @@ -699,10 +683,6 @@ pub mod genesis_config { let mut genesis = Genesis { genesis_time: genesis_time.try_into().unwrap(), native_token, - #[cfg(not(feature = "mainnet"))] - faucet_pow_difficulty, - #[cfg(not(feature = "mainnet"))] - faucet_withdrawal_limit, validators: validators.into_values().collect(), token_accounts, established_accounts: established_accounts.into_values().collect(), @@ -753,10 +733,6 @@ pub mod genesis_config { pub struct Genesis { pub genesis_time: DateTimeUtc, pub native_token: Address, - #[cfg(not(feature = "mainnet"))] - pub faucet_pow_difficulty: Option, - #[cfg(not(feature = "mainnet"))] - pub faucet_withdrawal_limit: Option, pub validators: Vec, pub token_accounts: Vec, pub established_accounts: Vec, @@ -1150,17 +1126,9 @@ pub fn genesis(num_validators: u64) -> Genesis { address: EthAddress([0; 20]), version: Default::default(), }, - governance: UpgradeableContract { - address: EthAddress([1; 20]), - version: Default::default(), - }, }, }), native_token: address::nam(), - #[cfg(not(feature = "mainnet"))] - faucet_pow_difficulty: None, - #[cfg(not(feature = "mainnet"))] - faucet_withdrawal_limit: None, } } diff --git a/apps/src/lib/config/utils.rs b/apps/src/lib/config/utils.rs index ca038dcb422..03725d112c8 100644 --- a/apps/src/lib/config/utils.rs +++ b/apps/src/lib/config/utils.rs @@ -47,8 +47,7 @@ fn num_of_threads_aux( } } -// fixme: Handle this gracefully with either an Option or a Result. Ensure that -// hostname resolution works. +// FIXME: Handle this gracefully with either an Option or a Result. pub fn convert_tm_addr_to_socket_addr( tm_addr: &TendermintAddress, ) -> SocketAddr { diff --git a/apps/src/lib/node/ledger/ethereum_oracle/events.rs b/apps/src/lib/node/ledger/ethereum_oracle/events.rs index bd11cf09e8e..4baae5e19f5 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/events.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/events.rs @@ -3,15 +3,11 @@ pub mod eth_events { use std::str::FromStr; use ethbridge_bridge_events::{ - BridgeEvents, TransferToErcFilter, TransferToNamadaFilter, - }; - use ethbridge_events::{DynEventCodec, Events as RawEvents}; - use ethbridge_governance_events::{ - GovernanceEvents, NewContractFilter, UpgradedContractFilter, + BridgeEvents, TransferToChainFilter, TransferToErcFilter, ValidatorSetUpdateFilter, }; + use ethbridge_events::{DynEventCodec, Events as RawEvents}; use namada::core::types::ethereum_structs; - use namada::eth_bridge::ethers::contract::EthEvent; use namada::types::address::Address; use namada::types::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, TransferToNamada, Uint, @@ -70,20 +66,17 @@ pub mod eth_events { TransferToErcFilter { nonce, transfers, - valid_map, relayer_address, }, )) => EthereumEvent::TransfersToEthereum { nonce: nonce.parse_uint256()?, transfers: transfers.parse_transfer_to_eth_array()?, - valid_transfers_map: valid_map, relayer: relayer_address.parse_address()?, }, - RawEvents::Bridge(BridgeEvents::TransferToNamadaFilter( - TransferToNamadaFilter { + RawEvents::Bridge(BridgeEvents::TransferToChainFilter( + TransferToChainFilter { nonce, transfers, - valid_map, confirmations: requested_confirmations, }, )) => { @@ -96,34 +89,15 @@ pub mod eth_events { nonce: nonce.parse_uint256()?, transfers: transfers .parse_transfer_to_namada_array()?, - valid_transfers_map: valid_map, } } - RawEvents::Governance(GovernanceEvents::NewContractFilter( - NewContractFilter { name: _, addr: _ }, - )) => { - return Err(Error::NotInUse( - NewContractFilter::name().into(), - )); - } - RawEvents::Governance( - GovernanceEvents::UpgradedContractFilter( - UpgradedContractFilter { name: _, addr: _ }, - ), - ) => { - return Err(Error::NotInUse( - UpgradedContractFilter::name().into(), - )); - } - RawEvents::Governance( - GovernanceEvents::ValidatorSetUpdateFilter( - ValidatorSetUpdateFilter { - validator_set_nonce, - bridge_validator_set_hash, - governance_validator_set_hash, - }, - ), - ) => EthereumEvent::ValidatorSetUpdate { + RawEvents::Bridge(BridgeEvents::ValidatorSetUpdateFilter( + ValidatorSetUpdateFilter { + validator_set_nonce, + bridge_validator_set_hash, + governance_validator_set_hash, + }, + )) => EthereumEvent::ValidatorSetUpdate { nonce: validator_set_nonce.into(), bridge_validator_hash: bridge_validator_set_hash .parse_keccak()?, @@ -253,7 +227,7 @@ pub mod eth_events { } } - impl Parse for Vec { + impl Parse for Vec { fn parse_transfer_to_namada_array( self, ) -> Result> { @@ -264,7 +238,7 @@ pub mod eth_events { } } - impl Parse for ethereum_structs::NamadaTransfer { + impl Parse for ethereum_structs::ChainTransfer { fn parse_transfer_to_namada(self) -> Result { let asset = self.from.parse_eth_address()?; let amount = self.amount.parse_amount()?; @@ -293,7 +267,7 @@ pub mod eth_events { let asset = self.from.parse_eth_address()?; let receiver = self.to.parse_eth_address()?; let amount = self.amount.parse_amount()?; - let checksum = self.namada_data_digest.parse_hash()?; + let checksum = self.data_digest.parse_hash()?; Ok(TransferToEthereum { asset, amount, @@ -321,9 +295,10 @@ pub mod eth_events { use assert_matches::assert_matches; use ethabi::ethereum_types::{H160, U256}; use ethbridge_events::{ - TRANSFER_TO_ERC_CODEC, TRANSFER_TO_NAMADA_CODEC, + TRANSFER_TO_CHAIN_CODEC, TRANSFER_TO_ERC_CODEC, VALIDATOR_SET_UPDATE_CODEC, }; + use namada::eth_bridge::ethers::contract::EthEvent; use super::*; use crate::node::ledger::ethereum_oracle::test_tools::event_log::GetLog; @@ -339,10 +314,9 @@ pub mod eth_events { let lower_than_min_confirmations = 5u64; let (codec, event) = ( - TRANSFER_TO_NAMADA_CODEC, - TransferToNamadaFilter { + TRANSFER_TO_CHAIN_CODEC, + TransferToChainFilter { transfers: vec![], - valid_map: vec![], nonce: 0.into(), confirmations: lower_than_min_confirmations.into(), }, @@ -363,42 +337,38 @@ pub mod eth_events { Ok(()) } - /// Test decoding a [`TransferToNamadaFilter`] Ethereum event. + /// Test decoding a "Transfer to Namada" Ethereum event. #[test] fn test_transfer_to_namada_decode() { let data = vec![ + 170, 156, 23, 249, 166, 216, 156, 37, 67, 204, 150, 161, 103, + 163, 161, 122, 243, 66, 109, 149, 141, 194, 27, 80, 238, 109, + 40, 128, 254, 233, 54, 163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 160, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 189, - 178, 49, 86, 120, 175, 236, 179, 103, 240, 50, 217, 63, 100, - 47, 100, 24, 10, 163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84, - 97, 116, 101, 115, 116, 49, 118, 52, 101, 104, 103, 119, 51, - 54, 120, 117, 117, 110, 119, 100, 54, 57, 56, 57, 112, 114, - 119, 100, 102, 107, 120, 113, 109, 110, 118, 115, 102, 106, - 120, 115, 54, 110, 118, 118, 54, 120, 120, 117, 99, 114, 115, - 51, 102, 51, 120, 99, 109, 110, 115, 51, 102, 99, 120, 100, - 122, 114, 118, 118, 122, 57, 120, 118, 101, 114, 122, 118, 122, - 114, 53, 54, 108, 101, 56, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 189, 178, 49, 86, + 120, 175, 236, 179, 103, 240, 50, 217, 63, 100, 47, 100, 24, + 10, 163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, + 0, 0, 0, 84, 97, 116, 101, 115, 116, 49, 118, 52, 101, 104, + 103, 119, 51, 54, 120, 117, 117, 110, 119, 100, 54, 57, 56, 57, + 112, 114, 119, 100, 102, 107, 120, 113, 109, 110, 118, 115, + 102, 106, 120, 115, 54, 110, 118, 118, 54, 120, 120, 117, 99, + 114, 115, 51, 102, 51, 120, 99, 109, 110, 115, 51, 102, 99, + 120, 100, 122, 114, 118, 118, 122, 57, 120, 118, 101, 114, 122, + 118, 122, 114, 53, 54, 108, 101, 56, 102, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, ]; - let raw: TransferToNamadaFilter = TRANSFER_TO_NAMADA_CODEC + let raw: TransferToChainFilter = TRANSFER_TO_CHAIN_CODEC .decode(ðabi::RawLog { - topics: vec![TransferToNamadaFilter::signature()], + topics: vec![TransferToChainFilter::signature()], data, }) .expect("Test failed") @@ -407,7 +377,7 @@ pub mod eth_events { assert_eq!( raw.transfers, - vec![ethereum_structs::NamadaTransfer { + vec![ethereum_structs::ChainTransfer { amount: 100u64.into(), from: ethabi::Address::from_str("0x5FbDB2315678afecb367f032d93F642f64180aa3").unwrap(), to: "atest1v4ehgw36xuunwd6989prwdfkxqmnvsfjxs6nvv6xxucrs3f3xcmns3fcxdzrvvz9xverzvzr56le8f".into(), @@ -425,10 +395,9 @@ pub mod eth_events { let higher_than_min_confirmations = 200u64; let (codec, event) = ( - TRANSFER_TO_NAMADA_CODEC, - TransferToNamadaFilter { + TRANSFER_TO_CHAIN_CODEC, + TransferToChainFilter { transfers: vec![], - valid_map: vec![], nonce: 0u64.into(), confirmations: higher_than_min_confirmations.into(), }, @@ -501,16 +470,15 @@ pub mod eth_events { let address: String = "atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90" .into(); - let nam_transfers = TransferToNamadaFilter { + let nam_transfers = TransferToChainFilter { transfers: vec![ - ethereum_structs::NamadaTransfer { + ethereum_structs::ChainTransfer { amount: 0u64.into(), from: H160([0; 20]), to: address.clone(), }; 2 ], - valid_map: vec![true; 2], nonce: 0u64.into(), confirmations: 0u64.into(), }; @@ -520,11 +488,10 @@ pub mod eth_events { from: H160([1; 20]), to: H160([2; 20]), amount: 0u64.into(), - namada_data_digest: [0; 32], + data_digest: [0; 32], }; 2 ], - valid_map: vec![true; 2], nonce: 0u64.into(), relayer_address: address, }; @@ -535,8 +502,8 @@ pub mod eth_events { }; assert_eq!( { - let decoded: TransferToNamadaFilter = - TRANSFER_TO_NAMADA_CODEC + let decoded: TransferToChainFilter = + TRANSFER_TO_CHAIN_CODEC .decode(&nam_transfers.clone().get_log()) .expect("Test failed") .try_into() diff --git a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs index a8db91831c8..6980778c075 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/mod.rs @@ -444,7 +444,6 @@ async fn process( let sig = codec.event_signature(); let addr: Address = match codec.kind() { EventKind::Bridge => config.bridge_contract.into(), - EventKind::Governance => config.governance_contract.into(), }; tracing::debug!( ?block_to_process, @@ -562,9 +561,7 @@ pub mod last_processed_block { mod test_oracle { use std::num::NonZeroU64; - use ethbridge_bridge_events::{ - TransferToErcFilter, TransferToNamadaFilter, - }; + use ethbridge_bridge_events::{TransferToChainFilter, TransferToErcFilter}; use namada::eth_bridge::ethers::types::H160; use namada::eth_bridge::structs::Erc20Transfer; use namada::types::address::testing::gen_established_address; @@ -710,16 +707,15 @@ mod test_oracle { // Increase height above the configured minimum confirmations controller.apply_cmd(TestCmd::NewHeight(min_confirmations.into())); - let new_event = TransferToNamadaFilter { + let new_event = TransferToChainFilter { nonce: 0.into(), transfers: vec![], - valid_map: vec![], confirmations: 100.into(), } .get_log(); let (sender, _) = channel(); controller.apply_cmd(TestCmd::NewEvent { - event_type: event_signature::(), + event_type: event_signature::(), log: new_event, height: 101, seen: sender, @@ -760,16 +756,15 @@ mod test_oracle { // set the oracle to be unresponsive controller.apply_cmd(TestCmd::Unresponsive); // send a new event to the oracle - let new_event = TransferToNamadaFilter { + let new_event = TransferToChainFilter { nonce: 0.into(), transfers: vec![], - valid_map: vec![], confirmations: 100.into(), } .get_log(); let (sender, mut seen) = channel(); controller.apply_cmd(TestCmd::NewEvent { - event_type: event_signature::(), + event_type: event_signature::(), log: new_event, height: 150, seen: sender, @@ -815,10 +810,9 @@ mod test_oracle { controller.apply_cmd(TestCmd::NewHeight(min_confirmations.into())); // confirmed after 100 blocks - let first_event = TransferToNamadaFilter { + let first_event = TransferToChainFilter { nonce: 0.into(), transfers: vec![], - valid_map: vec![], confirmations: 100.into(), } .get_log(); @@ -830,9 +824,8 @@ mod test_oracle { amount: 0.into(), from: H160([0; 20]), to: H160([1; 20]), - namada_data_digest: [0; 32], + data_digest: [0; 32], }], - valid_map: vec![true], relayer_address: gas_payer.to_string(), nonce: 0.into(), } @@ -848,7 +841,7 @@ mod test_oracle { }); let (sender, _recv) = channel(); controller.apply_cmd(TestCmd::NewEvent { - event_type: event_signature::(), + event_type: event_signature::(), log: first_event, height: 100, seen: sender, @@ -859,15 +852,9 @@ mod test_oracle { controller.apply_cmd(TestCmd::NewHeight(Uint256::from(200u32))); // check the correct event is received let event = eth_recv.recv().await.expect("Test failed"); - if let EthereumEvent::TransfersToNamada { - nonce, - transfers, - valid_transfers_map: valid_map, - } = event - { + if let EthereumEvent::TransfersToNamada { nonce, transfers } = event { assert_eq!(nonce, 0.into()); assert!(transfers.is_empty()); - assert!(valid_map.is_empty()); } else { panic!("Test failed, {:?}", event); } diff --git a/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs b/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs index 672a1546f8f..9a2454be17b 100644 --- a/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_oracle/test_tools/mod.rs @@ -6,9 +6,8 @@ pub mod event_log { // p.s.: https://medium.com/mycrypto/understanding-event-logs-on-the-ethereum-blockchain-f4ae7ba50378 use ethbridge_bridge_events::{ - TransferToErcFilter, TransferToNamadaFilter, + TransferToChainFilter, TransferToErcFilter, ValidatorSetUpdateFilter, }; - use ethbridge_governance_events::ValidatorSetUpdateFilter; use namada::eth_bridge::ethers::abi::AbiEncode; use namada::eth_bridge::ethers::contract::EthEvent; @@ -18,7 +17,7 @@ pub mod event_log { fn get_log(self) -> ethabi::RawLog; } - impl GetLog for TransferToNamadaFilter { + impl GetLog for TransferToChainFilter { fn get_log(self) -> ethabi::RawLog { ethabi::RawLog { topics: vec![Self::signature()], @@ -35,8 +34,7 @@ pub mod event_log { self.nonce.to_big_endian(&mut buf); ethabi::ethereum_types::H256(buf) }], - data: (self.transfers, self.valid_map, self.relayer_address) - .encode(), + data: (self.transfers, self.relayer_address).encode(), } } } diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 57ed9269e0d..53252065f11 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -248,9 +248,6 @@ where .flatten() }) .flatten(); - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = - self.invalidate_pow_solution_if_valid(wrapper); if let Err(msg) = protocol::charge_fee( wrapper, masp_transaction, @@ -260,8 +257,6 @@ where &mut self.vp_wasm_cache, &mut self.tx_wasm_cache, ), - #[cfg(not(feature = "mainnet"))] - has_valid_pow, Some(&native_block_proposer_address), &mut BTreeSet::default(), ) { @@ -281,165 +276,146 @@ where continue; } - let ( - mut tx_event, - tx_unsigned_hash, - mut tx_gas_meter, - has_valid_pow, - wrapper, - ) = match &tx_header.tx_type { - TxType::Wrapper(wrapper) => { - stats.increment_wrapper_txs(); - let tx_event = Event::new_tx_event(&tx, height.0); - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = - self.invalidate_pow_solution_if_valid(wrapper); - - let gas_meter = TxGasMeter::new(wrapper.gas_limit); - - ( - tx_event, - None, - gas_meter, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, - Some(tx.clone()), - ) - } - TxType::Decrypted(inner) => { - // We remove the corresponding wrapper tx from the queue - let mut tx_in_queue = self - .wl_storage - .storage - .tx_queue - .pop() - .expect("Missing wrapper tx in queue"); - let mut event = Event::new_tx_event(&tx, height.0); - - match inner { - DecryptedTx::Decrypted { has_valid_pow: _ } => { - if let Some(code_sec) = tx - .get_section(tx.code_sechash()) - .and_then(|x| Section::code_sec(x.as_ref())) - { - stats.increment_tx_type( - code_sec.code.hash().to_string(), + let (mut tx_event, tx_unsigned_hash, mut tx_gas_meter, wrapper) = + match &tx_header.tx_type { + TxType::Wrapper(wrapper) => { + stats.increment_wrapper_txs(); + let tx_event = Event::new_tx_event(&tx, height.0); + let gas_meter = TxGasMeter::new(wrapper.gas_limit); + (tx_event, None, gas_meter, Some(tx.clone())) + } + TxType::Decrypted(inner) => { + // We remove the corresponding wrapper tx from the queue + let mut tx_in_queue = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue"); + let mut event = Event::new_tx_event(&tx, height.0); + + match inner { + DecryptedTx::Decrypted => { + if let Some(code_sec) = tx + .get_section(tx.code_sechash()) + .and_then(|x| Section::code_sec(x.as_ref())) + { + stats.increment_tx_type( + code_sec.code.hash().to_string(), + ); + } + } + DecryptedTx::Undecryptable => { + tracing::info!( + "Tx with hash {} was un-decryptable", + tx_in_queue.tx.header_hash() ); + event["info"] = + "Transaction is invalid.".into(); + event["log"] = "Transaction could not be \ + decrypted." + .into(); + event["code"] = + ErrorCodes::Undecryptable.into(); + continue; } } - DecryptedTx::Undecryptable => { - tracing::info!( - "Tx with hash {} was un-decryptable", - tx_in_queue.tx.header_hash() - ); - event["info"] = "Transaction is invalid.".into(); - event["log"] = - "Transaction could not be decrypted.".into(); - event["code"] = ErrorCodes::Undecryptable.into(); - continue; - } - } - ( - event, - Some( - tx_in_queue - .tx - .update_header(TxType::Raw) - .header_hash(), + ( + event, + Some( + tx_in_queue + .tx + .update_header(TxType::Raw) + .header_hash(), + ), + TxGasMeter::new_from_sub_limit(tx_in_queue.gas), + None, + ) + } + TxType::Raw => { + tracing::error!( + "Internal logic error: FinalizeBlock received a \ + TxType::Raw transaction" + ); + continue; + } + TxType::Protocol(protocol_tx) => match protocol_tx.tx { + ProtocolTxType::BridgePoolVext + | ProtocolTxType::BridgePool + | ProtocolTxType::ValSetUpdateVext + | ProtocolTxType::ValidatorSetUpdate => ( + Event::new_tx_event(&tx, height.0), + None, + TxGasMeter::new_from_sub_limit(0.into()), + None, ), - TxGasMeter::new_from_sub_limit(tx_in_queue.gas), - #[cfg(not(feature = "mainnet"))] - false, - None, - ) - } - TxType::Raw => { - tracing::error!( - "Internal logic error: FinalizeBlock received a \ - TxType::Raw transaction" - ); - continue; - } - TxType::Protocol(protocol_tx) => match protocol_tx.tx { - ProtocolTxType::BridgePoolVext - | ProtocolTxType::BridgePool - | ProtocolTxType::ValSetUpdateVext - | ProtocolTxType::ValidatorSetUpdate => ( - Event::new_tx_event(&tx, height.0), - None, - TxGasMeter::new_from_sub_limit(0.into()), - #[cfg(not(feature = "mainnet"))] - false, - None, - ), - ProtocolTxType::EthEventsVext => { - let ext = + ProtocolTxType::EthEventsVext => { + let ext = ethereum_tx_data_variants::EthEventsVext::try_from( &tx, ) .unwrap(); - if self - .mode - .get_validator_address() - .map(|validator| { - validator == &ext.data.validator_addr - }) - .unwrap_or(false) - { - for event in ext.data.ethereum_events.iter() { - self.mode.dequeue_eth_event(event); + if self + .mode + .get_validator_address() + .map(|validator| { + validator == &ext.data.validator_addr + }) + .unwrap_or(false) + { + for event in ext.data.ethereum_events.iter() { + self.mode.dequeue_eth_event(event); + } } + ( + Event::new_tx_event(&tx, height.0), + None, + TxGasMeter::new_from_sub_limit(0.into()), + None, + ) } - ( - Event::new_tx_event(&tx, height.0), - None, - TxGasMeter::new_from_sub_limit(0.into()), - #[cfg(not(feature = "mainnet"))] - false, - None, - ) - } - ProtocolTxType::EthereumEvents => { - let digest = + ProtocolTxType::EthereumEvents => { + let digest = ethereum_tx_data_variants::EthereumEvents::try_from( &tx, ).unwrap(); - if let Some(address) = - self.mode.get_validator_address().cloned() - { - let this_signer = &( - address, - self.wl_storage.storage.get_last_block_height(), - ); - for MultiSignedEthEvent { event, signers } in - &digest.events + if let Some(address) = + self.mode.get_validator_address().cloned() { - if signers.contains(this_signer) { - self.mode.dequeue_eth_event(event); + let this_signer = &( + address, + self.wl_storage + .storage + .get_last_block_height(), + ); + for MultiSignedEthEvent { event, signers } in + &digest.events + { + if signers.contains(this_signer) { + self.mode.dequeue_eth_event(event); + } } } + ( + Event::new_tx_event(&tx, height.0), + None, + TxGasMeter::new_from_sub_limit(0.into()), + None, + ) } - ( - Event::new_tx_event(&tx, height.0), - None, - TxGasMeter::new_from_sub_limit(0.into()), - #[cfg(not(feature = "mainnet"))] - false, - None, - ) - } - ref protocol_tx_type => { - tracing::error!( - ?protocol_tx_type, - "Internal logic error: FinalizeBlock received an \ - unsupported TxType::Protocol transaction: {:?}", - protocol_tx - ); - continue; - } - }, - }; + ref protocol_tx_type => { + tracing::error!( + ?protocol_tx_type, + "Internal logic error: FinalizeBlock received \ + an unsupported TxType::Protocol transaction: \ + {:?}", + protocol_tx + ); + continue; + } + }, + }; match protocol::dispatch_tx( tx, @@ -454,8 +430,6 @@ where &mut self.vp_wasm_cache, &mut self.tx_wasm_cache, Some(&native_block_proposer_address), - #[cfg(not(feature = "mainnet"))] - has_valid_pow, ) .map_err(Error::TxApply) { @@ -470,8 +444,6 @@ where self.wl_storage.storage.tx_queue.push(TxInQueue { tx: wrapper.expect("Missing expected wrapper"), gas: tx_gas_meter.get_available_gas(), - #[cfg(not(feature = "mainnet"))] - has_valid_pow, }); } else { tracing::trace!( @@ -1145,8 +1117,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper_tx.header.chain_id = shell.chain_id.clone(); @@ -1156,7 +1126,8 @@ mod test_finalize_block { )); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - keypair, + [(0, keypair.clone())].into_iter().collect(), + None, ))); let tx = wrapper_tx.to_bytes(); ( @@ -1187,8 +1158,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1201,10 +1170,7 @@ mod test_finalize_block { .checked_sub(Gas::from(outer_tx.to_bytes().len() as u64)) .unwrap(); shell.enqueue_tx(outer_tx.clone(), gas_limit); - outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); outer_tx.decrypt(::G2Affine::prime_subgroup_generator()) .expect("Test failed"); ProcessedTx { @@ -1312,8 +1278,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1327,10 +1291,7 @@ mod test_finalize_block { .unwrap(); shell.enqueue_tx(outer_tx.clone(), gas_limit); - outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); let processed_tx = ProcessedTx { tx: outer_tx.to_bytes(), result: TxResult { @@ -1371,8 +1332,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); let processed_tx = ProcessedTx { @@ -1538,9 +1497,9 @@ mod test_finalize_block { .clone(); // ---- the ledger receives a new Ethereum event - let event = EthereumEvent::NewContract { - name: "Test".to_string(), - address: EthAddress([0; 20]), + let event = EthereumEvent::TransfersToNamada { + nonce: 0u64.into(), + transfers: vec![], }; tokio_test::block_on(oracle.send(event.clone())).expect("Test failed"); let [queued_event]: [EthereumEvent; 1] = @@ -1616,9 +1575,9 @@ mod test_finalize_block { .clone(); // ---- the ledger receives a new Ethereum event - let event = EthereumEvent::NewContract { - name: "Test".to_string(), - address: EthAddress([0; 20]), + let event = EthereumEvent::TransfersToNamada { + nonce: 0u64.into(), + transfers: vec![], }; tokio_test::block_on(oracle.send(event.clone())).expect("Test failed"); let [queued_event]: [EthereumEvent; 1] = @@ -1736,17 +1695,8 @@ mod test_finalize_block { #[test] /// Test that adding a new erc20 transfer to the bridge pool - /// increments the pool's nonce, whether only invalid transfers - /// were relayed or not. + /// increments the pool's nonce. fn test_bp_nonce_is_incremented() { - test_bp_nonce_is_incremented_aux(false); - test_bp_nonce_is_incremented_aux(true); - } - - /// Helper function to [`test_bp_nonce_is_incremented`]. - /// - /// Sets the validity of the transfer on Ethereum's side. - fn test_bp_nonce_is_incremented_aux(valid_transfer: bool) { use crate::node::ledger::shell::address::nam; test_bp(|shell: &mut TestShell| { let asset = EthAddress([0xff; 20]); @@ -1812,7 +1762,6 @@ mod test_finalize_block { let ethereum_event = EthereumEvent::TransfersToEthereum { nonce: 0u64.into(), transfers: vec![transfer], - valid_transfers_map: vec![valid_transfer], relayer: bertha, }; let (protocol_key, _, _) = @@ -2400,8 +2349,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper_tx.header.chain_id = shell.chain_id.clone(); @@ -2411,10 +2358,7 @@ mod test_finalize_block { )); let mut decrypted_tx = wrapper_tx.clone(); - decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + decrypted_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // Write inner hash in storage let inner_hash_key = replay_protection::get_replay_protection_key( @@ -2478,8 +2422,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), 0.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2489,7 +2431,8 @@ mod test_finalize_block { )); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let wrapper_hash_key = replay_protection::get_replay_protection_key( @@ -2551,8 +2494,6 @@ mod test_finalize_block { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2562,7 +2503,8 @@ mod test_finalize_block { )); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair.clone())].into_iter().collect(), + None, ))); let processed_tx = ProcessedTx { @@ -2635,8 +2577,6 @@ mod test_finalize_block { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), 5_000_000.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2646,7 +2586,10 @@ mod test_finalize_block { )); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let fee_amount = wrapper.header().wrapper().unwrap().get_tx_fee().unwrap(); diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 7e9d72eac37..cc79c9a9f05 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -265,10 +265,7 @@ where let pending_execution_key = gov_storage::get_proposal_execution_key(id); shell.wl_storage.write(&pending_execution_key, ())?; - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); tx.header.chain_id = shell.chain_id.clone(); tx.set_data(Data::new(encode(&id))); tx.set_code(Code::new(code)); @@ -284,8 +281,6 @@ where &mut shell.vp_wasm_cache, &mut shell.tx_wasm_cache, None, - #[cfg(not(feature = "mainnet"))] - false, ); shell .wl_storage diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index b650e2d7309..e25e72c8d1c 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -2,8 +2,6 @@ use std::collections::HashMap; use std::hash::Hash; -#[cfg(not(feature = "mainnet"))] -use namada::core::ledger::testnet_pow; use namada::ledger::eth_bridge::EthBridgeStatus; use namada::ledger::parameters::{self, Parameters}; use namada::ledger::pos::{staking_token_address, PosParams}; @@ -18,7 +16,6 @@ use namada::types::dec::Dec; use namada::types::hash::Hash as CodeHash; use namada::types::key::*; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; -use namada::types::token; use super::*; use crate::facade::tendermint_proto::google::protobuf; @@ -100,24 +97,6 @@ where fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, } = genesis.parameters; - #[cfg(not(feature = "mainnet"))] - // Try to find a faucet account - let faucet_account = { - genesis.established_accounts.iter().find_map( - |genesis::EstablishedAccount { - address, - vp_code_path, - .. - }| { - if vp_code_path == "vp_testnet_faucet.wasm" { - Some(address.clone()) - } else { - None - } - }, - ) - }; - // Store wasm codes into storage let checksums = wasm_loader::Checksums::read_checksums(&self.wasm_dir); for (name, full_name) in checksums.0.iter() { @@ -198,8 +177,6 @@ where pos_gain_d, staked_ratio, pos_inflation_amount, - #[cfg(not(feature = "mainnet"))] - faucet_account, gas_cost, fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, @@ -242,8 +219,6 @@ where // Initialize genesis established accounts self.initialize_established_accounts( - genesis.faucet_pow_difficulty, - genesis.faucet_withdrawal_limit, genesis.established_accounts, &implicit_vp_code_path, )?; @@ -272,8 +247,6 @@ where /// Initialize genesis established accounts fn initialize_established_accounts( &mut self, - faucet_pow_difficulty: Option, - faucet_withdrawal_limit: Option, accounts: Vec, implicit_vp_code_path: &str, ) -> Result<()> { @@ -320,26 +293,8 @@ where for (key, value) in storage { self.wl_storage.write_bytes(&key, value).unwrap(); } - - // When using a faucet WASM, initialize its PoW challenge storage - #[cfg(not(feature = "mainnet"))] - if vp_code_path == "vp_testnet_faucet.wasm" { - let difficulty = faucet_pow_difficulty.unwrap_or_default(); - // withdrawal limit defaults to 1000 NAM when not set - let withdrawal_limit = - faucet_withdrawal_limit.unwrap_or_else(|| { - token::Amount::native_whole(1_000).into() - }); - - testnet_pow::init_faucet_storage( - &mut self.wl_storage, - &address, - difficulty, - withdrawal_limit, - ) - .expect("Couldn't init faucet storage") - } } + Ok(()) } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 46a985b8fcf..778ceffd01c 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -28,6 +28,7 @@ use std::rc::Rc; use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::transaction::Transaction; +use namada::core::hints; use namada::core::ledger::eth_bridge; use namada::ledger::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use namada::ledger::events::log::EventLog; @@ -38,7 +39,8 @@ use namada::ledger::pos::namada_proof_of_stake::types::{ ConsensusValidator, ValidatorSetUpdate, }; use namada::ledger::protocol::{ - apply_wasm_tx, get_transfer_hash_from_storage, ShellParams, + apply_wasm_tx, get_fee_unshielding_transaction, + get_transfer_hash_from_storage, ShellParams, }; use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ @@ -52,10 +54,11 @@ use namada::proto::{self, Section, Tx}; use namada::types::address::Address; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthereumEvent; -use namada::types::internal::TxInQueue; +use namada::types::internal::{ExpiredTx, TxInQueue}; use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; use namada::types::time::DateTimeUtc; +use namada::types::transaction::protocol::EthereumTxData; use namada::types::transaction::{ hash_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, WrapperTx, @@ -814,9 +817,6 @@ where ) }); - // NOTE: the oracle isn't started through governance votes, so we don't - // check to see if we need to start it after epoch transitions - let root = self.wl_storage.storage.merkle_root(); tracing::info!( "Committed block hash: {}, height: {}", @@ -825,11 +825,13 @@ where ); response.data = root.0.to_vec(); + // validator specific actions if let ShellMode::Validator { eth_oracle: Some(eth_oracle), .. } = &self.mode { + // update the oracle's last processed eth block let last_processed_block = eth_oracle .last_processed_block_receiver .borrow() @@ -849,32 +851,76 @@ where blocks" ), } + + // broadcast any queued txs + self.broadcast_queued_txs(); } - #[cfg(not(feature = "abcipp"))] - { - use crate::node::ledger::shell::vote_extensions::iter_protocol_txs; + response + } - if let ShellMode::Validator { .. } = &self.mode { - let ext = self.craft_extension(); + /// Empties all the ledger's queues of transactions to be broadcasted + /// via CometBFT's P2P network. + #[inline] + fn broadcast_queued_txs(&mut self) { + self.broadcast_protocol_txs(); + self.broadcast_expired_txs(); + } - let protocol_key = self - .mode - .get_protocol_key() - .expect("Validators should have protocol keys"); + /// Broadcast any pending protocol transactions. + fn broadcast_protocol_txs(&mut self) { + use crate::node::ledger::shell::vote_extensions::iter_protocol_txs; - let protocol_txs = iter_protocol_txs(ext).map(|protocol_tx| { - protocol_tx - .sign(protocol_key, self.chain_id.clone()) - .to_bytes() - }); + let ext = self.craft_extension(); - for tx in protocol_txs { - self.mode.broadcast(tx); - } - } + let protocol_key = self + .mode + .get_protocol_key() + .expect("Validators should have protocol keys"); + + let protocol_txs = iter_protocol_txs(ext).map(|protocol_tx| { + protocol_tx + .sign(protocol_key, self.chain_id.clone()) + .to_bytes() + }); + + for tx in protocol_txs { + self.mode.broadcast(tx); + } + } + + /// Broadcast any expired transactions. + fn broadcast_expired_txs(&mut self) { + let eth_events = { + let mut events: Vec<_> = self + .wl_storage + .storage + .expired_txs_queue + .drain() + .map(|expired_tx| match expired_tx { + ExpiredTx::EthereumEvent(event) => event, + }) + .collect(); + events.sort(); + events + }; + if hints::likely(eth_events.is_empty()) { + // more often than not, there won't by any expired + // Ethereum events to retransmit + return; + } + if let Some(vote_extension) = self.sign_ethereum_events(eth_events) { + let protocol_key = self + .mode + .get_protocol_key() + .expect("Validators should have protocol keys"); + + let signed_tx = EthereumTxData::EthEventsVext(vote_extension) + .sign(protocol_key, self.chain_id.clone()) + .to_bytes(); + + self.mode.broadcast(signed_tx); } - response } /// Checks that neither the wrapper nor the inner transaction have already @@ -988,7 +1034,6 @@ where let config = namada::eth_bridge::oracle::config::Config { min_confirmations: config.min_confirmations.into(), bridge_contract: config.contracts.bridge.address, - governance_contract: config.contracts.governance.address, start_block, }; tracing::info!( @@ -1262,20 +1307,10 @@ where return response; } - let fee_unshield = wrapper - .unshield_section_hash - .and_then(|ref hash| tx.get_section(hash)) - .and_then(|section| { - if let Section::MaspTx(transaction) = section.as_ref() { - Some(transaction.to_owned()) - } else { - None - } - }); // Validate wrapper fees if let Err(e) = self.wrapper_fee_check( &wrapper, - fee_unshield, + get_fee_unshielding_transaction(&tx, &wrapper), &mut TempWlStorage::new(&self.wl_storage.storage), &mut self.vp_wasm_cache.clone(), &mut self.tx_wasm_cache.clone(), @@ -1307,56 +1342,6 @@ where response } - #[cfg(not(feature = "mainnet"))] - /// Check if the tx has a valid PoW solution. Unlike - /// `apply_pow_solution_if_valid`, this won't invalidate the solution. - fn has_valid_pow_solution( - &self, - tx: &namada::types::transaction::WrapperTx, - ) -> bool { - if let Some(solution) = &tx.pow_solution { - if let Some(faucet_address) = - namada::ledger::parameters::read_faucet_account_parameter( - &self.wl_storage, - ) - .expect("Must be able to read faucet account parameter") - { - let source = Address::from(&tx.pk); - return solution - .validate(&self.wl_storage, &faucet_address, source) - .expect("Must be able to validate PoW solutions"); - } - } - false - } - - #[cfg(not(feature = "mainnet"))] - /// Check if the tx has a valid PoW solution and if so invalidate it to - /// prevent replay. - fn invalidate_pow_solution_if_valid( - &mut self, - tx: &namada::types::transaction::WrapperTx, - ) -> bool { - if let Some(solution) = &tx.pow_solution { - if let Some(faucet_address) = - namada::ledger::parameters::read_faucet_account_parameter( - &self.wl_storage, - ) - .expect("Must be able to read faucet account parameter") - { - let source = Address::from(&tx.pk); - return solution - .invalidate_if_valid( - &mut self.wl_storage, - &faucet_address, - &source, - ) - .expect("Must be able to validate PoW solutions"); - } - } - false - } - /// Check that the Wrapper's signer has enough funds to pay fees. If a block /// proposer is provided, updates the balance of the fee payer #[allow(clippy::too_many_arguments)] @@ -1437,8 +1422,6 @@ where vp_wasm_cache, tx_wasm_cache, ), - #[cfg(not(feature = "mainnet"))] - false, ) { Ok(result) => { if !result.is_accepted() { @@ -1462,19 +1445,10 @@ where } let result = match block_proposer { - Some(proposer) => protocol::transfer_fee( - temp_wl_storage, - proposer, - #[cfg(not(feature = "mainnet"))] - self.has_valid_pow_solution(wrapper), - wrapper, - ), - None => protocol::check_fees( - temp_wl_storage, - #[cfg(not(feature = "mainnet"))] - self.has_valid_pow_solution(wrapper), - wrapper, - ), + Some(proposer) => { + protocol::transfer_fee(temp_wl_storage, proposer, wrapper) + } + None => protocol::check_fees(temp_wl_storage, wrapper), }; result.map_err(Error::TxApply) @@ -1836,8 +1810,6 @@ mod test_utils { self.shell.wl_storage.storage.tx_queue.push(TxInQueue { tx, gas: inner_tx_gas, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, }); } @@ -2054,8 +2026,6 @@ mod test_utils { keypair.ref_to(), Epoch(0), 300_000.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2065,8 +2035,6 @@ mod test_utils { shell.wl_storage.storage.tx_queue.push(TxInQueue { tx: wrapper, gas: u64::MAX.into(), - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, }); // Artificially increase the block height so that chain // will read the new block when restarted @@ -2165,7 +2133,7 @@ mod abciplus_mempool_tests { use namada::types::key::RefTo; use namada::types::storage::BlockHeight; use namada::types::transaction::protocol::{ - EthereumTxData, ProtocolTx, ProtocolTxType, + ethereum_tx_data_variants, ProtocolTx, ProtocolTxType, }; use namada::types::vote_extensions::{bridge_pool_roots, ethereum_events}; @@ -2173,6 +2141,50 @@ mod abciplus_mempool_tests { use crate::node::ledger::shell::test_utils; use crate::wallet; + /// Check that broadcasting expired Ethereum events works + /// as expected. + #[test] + fn test_commit_broadcasts_expired_eth_events() { + let (mut shell, mut broadcaster_rx, _, _) = + test_utils::setup_at_height(5); + + // push expired events to queue + let ethereum_event_0 = EthereumEvent::TransfersToNamada { + nonce: 0u64.into(), + transfers: vec![], + }; + let ethereum_event_1 = EthereumEvent::TransfersToNamada { + nonce: 1u64.into(), + transfers: vec![], + }; + shell + .wl_storage + .storage + .expired_txs_queue + .push(ExpiredTx::EthereumEvent(ethereum_event_0.clone())); + shell + .wl_storage + .storage + .expired_txs_queue + .push(ExpiredTx::EthereumEvent(ethereum_event_1.clone())); + + // broadcast them + shell.broadcast_expired_txs(); + + // attempt to receive vote extension tx aggregating + // all expired events + let serialized_tx = broadcaster_rx.blocking_recv().unwrap(); + let tx = Tx::try_from(&serialized_tx[..]).unwrap(); + + // check data inside tx + let vote_extension = + ethereum_tx_data_variants::EthEventsVext::try_from(&tx).unwrap(); + assert_eq!( + vote_extension.data.ethereum_events, + vec![ethereum_event_0, ethereum_event_1] + ); + } + /// Test that we do not include protocol txs in the mempool, /// voting on ethereum events or signing bridge pool roots /// and nonces if the bridge is inactive. @@ -2189,7 +2201,6 @@ mod abciplus_mempool_tests { let ethereum_event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let eth_vext = EthereumTxData::EthEventsVext( ethereum_events::Vext { @@ -2242,7 +2253,6 @@ mod abciplus_mempool_tests { let ethereum_event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = { let ext = ethereum_events::Vext { @@ -2276,7 +2286,6 @@ mod abciplus_mempool_tests { let ethereum_event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = { let ext = ethereum_events::Vext { @@ -2299,7 +2308,8 @@ mod abciplus_mempool_tests { tx.set_data(Data::new(ext.try_to_vec().expect("Test falied"))); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), - &protocol_key, + [(0, protocol_key)].into_iter().collect(), + None, ))); tx } @@ -2310,7 +2320,7 @@ mod abciplus_mempool_tests { } #[cfg(test)] -mod test_mempool_validate { +mod tests { use namada::proof_of_stake::Epoch; use namada::proto::{Code, Data, Section, Signature, Tx}; use namada::types::transaction::{Fee, WrapperTx}; @@ -2336,8 +2346,6 @@ mod test_mempool_validate { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); unsigned_wrapper.header.chain_id = shell.chain_id.clone(); @@ -2374,8 +2382,6 @@ mod test_mempool_validate { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); invalid_wrapper.header.chain_id = shell.chain_id.clone(); @@ -2384,7 +2390,8 @@ mod test_mempool_validate { .set_data(Data::new("transaction data".as_bytes().to_owned())); invalid_wrapper.add_section(Section::Signature(Signature::new( invalid_wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // we mount a malleability attack to try and remove the fee @@ -2443,8 +2450,6 @@ mod test_mempool_validate { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2452,7 +2457,8 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Write wrapper hash to storage @@ -2602,8 +2608,6 @@ mod test_mempool_validate { keypair.ref_to(), Epoch(0), (block_gas_limit + 1).into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2611,7 +2615,8 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let result = shell.mempool_validate( @@ -2636,8 +2641,6 @@ mod test_mempool_validate { keypair.ref_to(), Epoch(0), 0.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2645,7 +2648,8 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let result = shell.mempool_validate( @@ -2670,8 +2674,6 @@ mod test_mempool_validate { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2679,7 +2681,10 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let result = shell.mempool_validate( @@ -2704,8 +2709,6 @@ mod test_mempool_validate { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2713,7 +2716,10 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let result = shell.mempool_validate( @@ -2737,8 +2743,6 @@ mod test_mempool_validate { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), 150_000.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2746,7 +2750,10 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let result = shell.mempool_validate( @@ -2770,8 +2777,6 @@ mod test_mempool_validate { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2779,7 +2784,10 @@ mod test_mempool_validate { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let result = shell.mempool_validate( diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index e8633940b9a..3687a6d39b4 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -5,9 +5,10 @@ use namada::core::ledger::gas::TxGasMeter; #[cfg(feature = "abcipp")] use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use namada::ledger::pos::PosQueries; +use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::{DBIter, StorageHasher, TempWlStorage, DB}; use namada::proof_of_stake::find_validator_by_raw_hash; -use namada::proto::{Section, Tx}; +use namada::proto::Tx; use namada::types::address::Address; use namada::types::internal::TxInQueue; use namada::types::key::tm_raw_hash_to_string; @@ -249,19 +250,9 @@ where .map_err(|_| ())?; // Check fees - let fee_unshield = - wrapper.unshield_section_hash.and_then(|ref hash| { - tx.get_section(hash).and_then(|section| { - if let Section::MaspTx(transaction) = section.as_ref() { - Some(transaction.to_owned()) - } else { - None - } - }) - }); match self.wrapper_fee_check( &wrapper, - fee_unshield, + get_fee_unshielding_transaction(&tx, &wrapper), temp_wl_storage, vp_wasm_cache, tx_wasm_cache, @@ -300,17 +291,12 @@ where |TxInQueue { tx, gas: _, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, }| { let mut tx = tx.clone(); match tx.decrypt(privkey).ok() { Some(_) => { - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: *has_valid_pow, - })); + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); tx }, // An absent or undecryptable inner_tx are both @@ -644,9 +630,7 @@ mod test_prepare_proposal { #[test] fn test_prepare_proposal_rejects_non_wrapper_tx() { let (shell, _recv, _, _) = test_utils::setup(); - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted { - has_valid_pow: true, - })); + let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); tx.header.chain_id = shell.chain_id.clone(); let req = RequestPrepareProposal { txs: vec![tx.to_bytes()], @@ -672,8 +656,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -804,7 +786,6 @@ mod test_prepare_proposal { let ethereum_event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let signed_vote_extension = { let ev = ethereum_event; @@ -1005,7 +986,6 @@ mod test_prepare_proposal { let ethereum_event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let signed_eth_ev_vote_extension = { let ext = ethereum_events::Vext { @@ -1124,8 +1104,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); tx.header.chain_id = shell.chain_id.clone(); @@ -1135,7 +1113,8 @@ mod test_prepare_proposal { )); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), - &keypair, + [(0, keypair.clone())].into_iter().collect(), + None, ))); let gas = Gas::from( @@ -1146,10 +1125,7 @@ mod test_prepare_proposal { shell.enqueue_tx(tx.clone(), gas); expected_wrapper.push(tx.clone()); req.txs.push(tx.to_bytes()); - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); expected_decrypted.push(tx.clone()); } // we extract the inner data from the txs for testing @@ -1199,8 +1175,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -1208,7 +1182,8 @@ mod test_prepare_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Write wrapper hash to storage @@ -1253,8 +1228,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -1262,7 +1235,8 @@ mod test_prepare_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let req = RequestPrepareProposal { @@ -1295,8 +1269,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -1304,7 +1276,8 @@ mod test_prepare_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let inner_unsigned_hash = wrapper.clone().update_header(TxType::Raw).header_hash(); @@ -1350,8 +1323,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -1362,7 +1333,8 @@ mod test_prepare_proposal { let mut new_wrapper = wrapper.clone(); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( @@ -1373,13 +1345,12 @@ mod test_prepare_proposal { keypair_2.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); new_wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair_2, + [(0, keypair_2)].into_iter().collect(), + None, ))); let req = RequestPrepareProposal { @@ -1410,8 +1381,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper_tx.header.chain_id = shell.chain_id.clone(); @@ -1421,7 +1390,8 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let time = DateTimeUtc::now(); @@ -1460,8 +1430,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), (block_gas_limit + 1).into(), - #[cfg(not(feature = "mainnet"))] - None, None, ); let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); @@ -1471,7 +1439,8 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let req = RequestPrepareProposal { @@ -1500,8 +1469,6 @@ mod test_prepare_proposal { keypair.ref_to(), Epoch(0), 0.into(), - #[cfg(not(feature = "mainnet"))] - None, None, ); @@ -1512,7 +1479,8 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let req = RequestPrepareProposal { @@ -1540,8 +1508,6 @@ mod test_prepare_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, ); @@ -1552,7 +1518,10 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let req = RequestPrepareProposal { @@ -1580,8 +1549,6 @@ mod test_prepare_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, ); let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); @@ -1591,7 +1558,10 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let req = RequestPrepareProposal { @@ -1618,8 +1588,6 @@ mod test_prepare_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, ); let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); @@ -1629,7 +1597,10 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let req = RequestPrepareProposal { @@ -1656,8 +1627,6 @@ mod test_prepare_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, ); let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); @@ -1667,7 +1636,10 @@ mod test_prepare_proposal { .set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper_tx.add_section(Section::Signature(Signature::new( wrapper_tx.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); let req = RequestPrepareProposal { diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index dca2d4fe4ed..ab544de3f83 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -6,6 +6,7 @@ use namada::core::hints; use namada::core::ledger::storage::WlStorage; use namada::ledger::eth_bridge::{EthBridgeQueries, SendValsetUpd}; use namada::ledger::pos::PosQueries; +use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::TempWlStorage; use namada::proof_of_stake::find_validator_by_raw_hash; use namada::types::internal::TxInQueue; @@ -889,22 +890,9 @@ where } // Check that the fee payer has sufficient balance. - let fee_unshield = - wrapper.unshield_section_hash.and_then(|ref hash| { - tx.get_section(hash).and_then(|section| { - if let Section::MaspTx(transaction) = - section.as_ref() - { - Some(transaction.to_owned()) - } else { - None - } - }) - }); - match self.wrapper_fee_check( &wrapper, - fee_unshield, + get_fee_unshielding_transaction(&tx, &wrapper), temp_wl_storage, vp_wasm_cache, tx_wasm_cache, @@ -1182,7 +1170,6 @@ mod test_process_proposal { let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = ethereum_events::Vext { validator_addr: addr.clone(), @@ -1309,7 +1296,6 @@ mod test_process_proposal { let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = ethereum_events::Vext { validator_addr: addr.clone(), @@ -1414,7 +1400,6 @@ mod test_process_proposal { let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = { // generate a valid signature @@ -1484,7 +1469,6 @@ mod test_process_proposal { let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = { #[allow(clippy::redundant_clone)] @@ -1540,7 +1524,6 @@ mod test_process_proposal { let event = EthereumEvent::TransfersToNamada { nonce: 0u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ext = { #[allow(clippy::redundant_clone)] @@ -1598,8 +1581,6 @@ mod test_process_proposal { public_key, Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1651,8 +1632,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1660,7 +1639,8 @@ mod test_process_proposal { outer_tx.set_data(Data::new("transaction data".as_bytes().to_owned())); outer_tx.add_section(Section::Signature(Signature::new( outer_tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let mut new_tx = outer_tx.clone(); if let TxType::Wrapper(wrapper) = &mut new_tx.header.tx_type { @@ -1724,8 +1704,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1733,7 +1711,8 @@ mod test_process_proposal { outer_tx.set_data(Data::new("transaction data".as_bytes().to_owned())); outer_tx.add_section(Section::Signature(Signature::new( outer_tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let response = { @@ -1790,8 +1769,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1799,7 +1776,8 @@ mod test_process_proposal { outer_tx.set_data(Data::new("transaction data".as_bytes().to_owned())); outer_tx.add_section(Section::Signature(Signature::new( outer_tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let response = { @@ -1846,8 +1824,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); outer_tx.header.chain_id = shell.chain_id.clone(); @@ -1861,10 +1837,7 @@ mod test_process_proposal { .unwrap(); shell.enqueue_tx(outer_tx.clone(), gas_limit); - outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); txs.push(outer_tx); } #[cfg(feature = "abcipp")] @@ -1929,8 +1902,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); tx.header.chain_id = shell.chain_id.clone(); @@ -1985,8 +1956,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); tx.header.chain_id = shell.chain_id.clone(); @@ -2035,8 +2004,6 @@ mod test_process_proposal { pk: keypair.ref_to(), epoch: Epoch(0), gas_limit: GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - pow_solution: None, unshield_section_hash: None, }; @@ -2071,10 +2038,7 @@ mod test_process_proposal { #[test] fn test_too_many_decrypted_txs() { let (shell, _recv, _, _) = test_utils::setup_at_height(3u64); - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); tx.header.chain_id = shell.chain_id.clone(); tx.set_code(Code::new("wasm_code".as_bytes().to_owned())); tx.set_data(Data::new("transaction data".as_bytes().to_owned())); @@ -2155,8 +2119,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2164,7 +2126,8 @@ mod test_process_proposal { wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Write wrapper hash to storage @@ -2232,8 +2195,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2241,7 +2202,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Run validation @@ -2291,8 +2253,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2300,7 +2260,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let inner_unsigned_hash = wrapper.clone().update_header(TxType::Raw).header_hash(); @@ -2355,8 +2316,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2365,7 +2324,8 @@ mod test_process_proposal { let mut new_wrapper = wrapper.clone(); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let inner_unsigned_hash = wrapper.clone().update_header(TxType::Raw).header_hash(); @@ -2378,13 +2338,12 @@ mod test_process_proposal { keypair_2.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); new_wrapper.add_section(Section::Signature(Signature::new( new_wrapper.sechashes(), - &keypair_2, + [(0, keypair_2)].into_iter().collect(), + None, ))); // Run validation @@ -2427,8 +2386,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); let wrong_chain_id = ChainId("Wrong chain id".to_string()); @@ -2437,7 +2394,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let protocol_key = shell.mode.get_protocol_key().expect("Test failed"); @@ -2491,8 +2449,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = wrong_chain_id.clone(); @@ -2501,12 +2457,11 @@ mod test_process_proposal { .set_data(Data::new("new transaction data".as_bytes().to_owned())); let mut decrypted = wrapper.clone(); - decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - has_valid_pow: false, - })); + decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); decrypted.add_section(Section::Signature(Signature::new( decrypted.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let gas_limit = Gas::from(wrapper.header.wrapper().unwrap().gas_limit) .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) @@ -2514,7 +2469,6 @@ mod test_process_proposal { let wrapper_in_queue = TxInQueue { tx: wrapper, gas: gas_limit, - has_valid_pow: false, }; shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); @@ -2557,8 +2511,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2567,7 +2519,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Run validation @@ -2601,8 +2554,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2612,12 +2563,11 @@ mod test_process_proposal { .set_data(Data::new("new transaction data".as_bytes().to_owned())); let mut decrypted = wrapper.clone(); - decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - has_valid_pow: false, - })); + decrypted.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); decrypted.add_section(Section::Signature(Signature::new( decrypted.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let gas_limit = Gas::from(wrapper.header.wrapper().unwrap().gas_limit) .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) @@ -2625,7 +2575,6 @@ mod test_process_proposal { let wrapper_in_queue = TxInQueue { tx: wrapper, gas: gas_limit, - has_valid_pow: false, }; shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); @@ -2665,8 +2614,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), (block_gas_limit + 1).into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2674,7 +2621,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Run validation @@ -2708,8 +2656,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), 0.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2717,7 +2663,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); // Run validation @@ -2750,8 +2697,6 @@ mod test_process_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2759,7 +2704,10 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); // Run validation @@ -2792,8 +2740,6 @@ mod test_process_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2801,7 +2747,10 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); // Run validation @@ -2834,8 +2783,6 @@ mod test_process_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), 150_000.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2843,7 +2790,10 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); // Run validation @@ -2876,8 +2826,6 @@ mod test_process_proposal { crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2885,7 +2833,10 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &crate::wallet::defaults::albert_keypair(), + [(0, crate::wallet::defaults::albert_keypair())] + .into_iter() + .collect(), + None, ))); // Run validation @@ -2921,8 +2872,6 @@ mod test_process_proposal { keypair.ref_to(), Epoch(0), GAS_LIMIT_MULTIPLIER.into(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -2930,7 +2879,8 @@ mod test_process_proposal { wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.add_section(Section::Signature(Signature::new( wrapper.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); let wrapper = wrapper.to_bytes(); for height in [1u64, 2] { diff --git a/apps/src/lib/node/ledger/shell/testing/client.rs b/apps/src/lib/node/ledger/shell/testing/client.rs index c07735d0b9b..9ebc825f545 100644 --- a/apps/src/lib/node/ledger/shell/testing/client.rs +++ b/apps/src/lib/node/ledger/shell/testing/client.rs @@ -3,13 +3,14 @@ use std::ops::ControlFlow; use clap::Command as App; use eyre::Report; use namada::types::control_flow::Halt; +use namada::types::io::Io; use tendermint_config::net::Address as TendermintAddress; use super::node::MockNode; use crate::cli::api::{CliApi, CliClient}; use crate::cli::args::Global; use crate::cli::{args, cmds, Cmd, Context, NamadaClient, NamadaRelayer}; -use crate::node::ledger::shell::testing::utils::Bin; +use crate::node::ledger::shell::testing::utils::{Bin, TestingIo}; pub fn run( node: &MockNode, @@ -24,7 +25,7 @@ pub fn run( wasm_dir: Some(locked.wasm_dir.clone()), } }; - let ctx = Context::new(global.clone())?; + let ctx = Context::new::(global.clone())?; let rt = tokio::runtime::Runtime::new().unwrap(); match who { @@ -46,7 +47,10 @@ pub fn run( NamadaClient::WithoutContext(sub_cmd, global) } }; - rt.block_on(CliApi::<()>::handle_client_command(Some(node), cmd)) + rt.block_on(CliApi::::handle_client_command( + Some(node), + cmd, + )) } Bin::Wallet => { args.insert(0, "wallet"); @@ -56,7 +60,7 @@ pub fn run( let cmd = cmds::NamadaWallet::parse(&matches) .expect("Could not parse wallet command"); - CliApi::<()>::handle_wallet_command(cmd, ctx) + CliApi::::handle_wallet_command(cmd, ctx) } Bin::Relayer => { args.insert(0, "relayer"); @@ -78,7 +82,10 @@ pub fn run( NamadaRelayer::ValidatorSet(sub_cmd) } }; - rt.block_on(CliApi::<()>::handle_relayer_command(Some(node), cmd)) + rt.block_on(CliApi::::handle_relayer_command( + Some(node), + cmd, + )) } } } @@ -89,7 +96,7 @@ impl<'a> CliClient for &'a MockNode { unreachable!("MockNode should always be instantiated at test start.") } - async fn wait_until_node_is_synced(&self) -> Halt<()> { + async fn wait_until_node_is_synced(&self) -> Halt<()> { ControlFlow::Continue(()) } } diff --git a/apps/src/lib/node/ledger/shell/testing/node.rs b/apps/src/lib/node/ledger/shell/testing/node.rs index cebb970f623..3f9119e20a9 100644 --- a/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/apps/src/lib/node/ledger/shell/testing/node.rs @@ -8,7 +8,7 @@ use data_encoding::HEXUPPER; use lazy_static::lazy_static; use namada::ledger::events::log::dumb_queries; use namada::ledger::queries::{ - Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, + EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada::ledger::storage::{ LastBlock, Sha256Hasher, EPOCH_SWITCH_BLOCKS_DELAY, @@ -19,6 +19,7 @@ use namada::proof_of_stake::{ read_consensus_validator_set_addresses_with_stake, validator_consensus_key_handle, }; +use namada::sdk::queries::Client; use namada::tendermint_proto::abci::VoteInfo; use namada::tendermint_rpc::endpoint::abci_info; use namada::tendermint_rpc::SimpleRequest; @@ -225,6 +226,7 @@ impl MockNode { }; let mut locked = self.shell.lock().unwrap(); let mut result = locked.process_proposal(req); + let mut errors: Vec<_> = result .tx_results .iter() @@ -395,7 +397,7 @@ impl<'a> Client for &'a MockNode { tx: namada::tendermint::abci::Transaction, ) -> Result { - let resp = tendermint_rpc::endpoint::broadcast::tx_sync::Response { + let mut resp = tendermint_rpc::endpoint::broadcast::tx_sync::Response { code: Default::default(), data: Default::default(), log: Default::default(), @@ -404,6 +406,7 @@ impl<'a> Client for &'a MockNode { let tx_bytes: Vec = tx.into(); self.submit_tx(tx_bytes); if !self.success() { + resp.code = tendermint::abci::Code::Err(1337); // TODO: submit_tx should return the correct error code + message return Ok(resp); } else { self.clear_results(); diff --git a/apps/src/lib/node/ledger/shell/testing/utils.rs b/apps/src/lib/node/ledger/shell/testing/utils.rs index e66ead21e7d..bfcb7f50ab9 100644 --- a/apps/src/lib/node/ledger/shell/testing/utils.rs +++ b/apps/src/lib/node/ledger/shell/testing/utils.rs @@ -1,6 +1,12 @@ +use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use lazy_static::lazy_static; +use namada::types::io::{prompt_aux, read_aux, Io}; use tempfile::tempdir; +use tokio::io::{AsyncRead, ReadBuf}; /// Namada binaries #[derive(Debug)] @@ -46,3 +52,221 @@ impl Default for TestDir { Self::new() } } + +/// The max number of bytes that the is currently remembered from stdout while +/// testing. +const TESTOUT_BUF_SIZE: usize = 100_000; + +lazy_static! { + /// A replacement for stdout in testing. The maximum number of bytes + /// it holds is limited to prevent memory issues. + pub static ref TESTOUT: std::sync::Arc>> = + std::sync::Arc::new(std::sync::Mutex::new(FixedBuffer::new(TESTOUT_BUF_SIZE))); +} + +lazy_static! { + /// A replacement for stdin in testing. + pub static ref TESTIN: AtomicBuffer = + AtomicBuffer(std::sync::Arc::new(std::sync::Mutex::new(vec![]))); +} + +pub struct TestingIo; + +#[async_trait::async_trait(?Send)] +impl Io for TestingIo { + fn print(output: impl AsRef) { + let mut testout = TESTOUT.lock().unwrap(); + testout.append(output.as_ref().as_bytes().to_vec()); + print!("{}", output.as_ref()); + } + + fn println(output: impl AsRef) { + let mut testout = TESTOUT.lock().unwrap(); + let mut bytes = output.as_ref().as_bytes().to_vec(); + bytes.extend_from_slice("\n".as_bytes()); + testout.append(bytes); + println!("{}", output.as_ref()); + } + + fn write( + _: W, + output: impl AsRef, + ) -> std::io::Result<()> { + Self::print(output); + Ok(()) + } + + fn writeln( + _: W, + output: impl AsRef, + ) -> std::io::Result<()> { + Self::println(output); + Ok(()) + } + + fn eprintln(output: impl AsRef) { + let mut testout = TESTOUT.lock().unwrap(); + let mut bytes = output.as_ref().as_bytes().to_vec(); + bytes.extend_from_slice("\n".as_bytes()); + testout.append(bytes); + eprintln!("{}", output.as_ref()); + } + + async fn read() -> tokio::io::Result { + read_aux(&*TESTIN).await + } + + async fn prompt(question: impl AsRef) -> String { + prompt_aux(&*TESTIN, tokio::io::stdout(), question.as_ref()).await + } +} + +/// Test helper that captures stdout of +/// a process. +pub struct CapturedOutput { + pub output: String, + pub result: T, + input: String, +} + +impl CapturedOutput { + pub fn with_input(input: String) -> Self { + Self { + output: "".to_string(), + result: (), + input, + } + } +} + +impl CapturedOutput { + /// Run a client command and capture + /// the output to the mocked stdout. + pub fn of(func: F) -> Self + where + F: FnOnce() -> T, + { + let mut capture = Self { + output: Default::default(), + result: func(), + input: Default::default(), + }; + capture.output = TESTOUT.lock().unwrap().read_string(); + capture + } + + /// Run a client command with input to the mocked stdin and capture + /// the output to the mocked stdout + pub fn run(&self, func: F) -> CapturedOutput + where + F: FnOnce() -> U, + { + { + // write the input to the mocked stdin + let mut buf = TESTIN.lock().unwrap(); + buf.clear(); + buf.extend_from_slice(self.input.as_bytes()); + } + CapturedOutput::of(func) + } + + /// Check if the captured output contains the regex. + pub fn matches(&self, needle: regex::Regex) -> bool { + needle.captures(&self.output).is_some() + } + + /// Check if the captured output contains the string. + pub fn contains(&self, needle: &str) -> bool { + let needle = regex::Regex::new(needle).unwrap(); + self.matches(needle) + } +} + +/// A buffer with a max size. Drops elements from the front on +/// size overflow. +pub struct FixedBuffer { + inner: Vec, + max_size: usize, +} + +impl FixedBuffer { + fn new(max_size: usize) -> Self { + Self { + inner: vec![], + max_size, + } + } + + /// Remove the first `size` elements from the buffer. + fn roll(&mut self, size: usize) { + self.inner = self.inner[size..].to_vec(); + } + + /// Add data to the end of the buffer, deleting from the + /// front as necessary. + fn append(&mut self, mut other: Vec) { + // if new data exceeds max size, take the tail. + if other.len() > self.max_size { + self.inner = other[(other.len() - self.max_size)..].to_vec(); + return; + } + // check if appending the data overflows buffer + let free_space = self.max_size - self.inner.len(); + if other.len() > free_space { + // delete the minimum amount of data from the front of the buffer + // to fit new data. + self.roll(other.len() - free_space); + } + self.inner.append(&mut other); + } +} + +impl FixedBuffer { + /// Read the inner buffer out to string + pub fn read_string(&mut self) -> String { + let mut fresh = vec![]; + std::mem::swap(&mut fresh, &mut self.inner); + String::from_utf8(fresh).unwrap() + } +} + +pub struct AtomicBuffer(std::sync::Arc>>); + +impl Deref for AtomicBuffer { + type Target = std::sync::Arc>>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a> AsyncRead for &'a AtomicBuffer { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + let mut inner = self.lock().unwrap(); + let buf_before = buf.filled().len(); + let res = + AsyncRead::poll_read(Pin::new(&mut inner.as_slice()), cx, buf); + let amount_read = buf.filled().len() - buf_before; + *inner.deref_mut() = inner[amount_read..].to_vec(); + res + } +} + +#[cfg(test)] +mod testing { + use super::*; + + #[test] + fn test_buffer() { + let mut buffer = FixedBuffer::::new(10); + buffer.inner = (1u64..=9_u64).collect(); + buffer.append(vec![10, 11, 12, 13, 14, 15]); + assert_eq!(buffer.inner, (6u64..=15_u64).collect::>()); + buffer.append((20u64..=40_u64).collect()); + assert_eq!(buffer.inner, (31u64..=40_u64).collect::>()); + } +} diff --git a/apps/src/lib/node/ledger/shell/vote_extensions.rs b/apps/src/lib/node/ledger/shell/vote_extensions.rs index 5f2828442b4..658c35a121d 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -104,8 +104,19 @@ where } /// Extend PreCommit votes with [`ethereum_events::Vext`] instances. + #[inline] pub fn extend_vote_with_ethereum_events( &mut self, + ) -> Option> { + let events = self.new_ethereum_events(); + self.sign_ethereum_events(events) + } + + /// Sign the given Ethereum events, and return the associated + /// vote extension protocol transaction. + pub fn sign_ethereum_events( + &mut self, + ethereum_events: Vec, ) -> Option> { if !self.wl_storage.ethbridge_queries().is_bridge_active() { return None; @@ -124,7 +135,7 @@ where .get_current_decision_height(), #[cfg(not(feature = "abcipp"))] block_height: self.wl_storage.storage.get_last_block_height(), - ethereum_events: self.new_ethereum_events(), + ethereum_events, validator_addr, }; if !ext.ethereum_events.is_empty() { diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 1ee2331f23b..0dd85bfd700 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -192,24 +192,11 @@ where /// thus the inclusion of its container Ethereum events vote /// extension. /// - /// Additionally, the length of the transfers array and their - /// respective validity map must match, for the event to be - /// considered valid. - /// /// ## Transfers to Namada /// /// For a transfers to Namada event to be considered valid, /// the nonce of this kind of event must not be lower than /// the one stored in Namada. - /// - /// In this case, the length of the transfers array and their - /// respective validity map must also match. - /// - /// ## Whitelist updates - /// - /// For any of these events to be considered valid, the - /// whitelist update nonce in storage must be greater - /// than or equal to the nonce in the event. fn validate_eth_event( &self, event: &EthereumEvent, @@ -222,20 +209,8 @@ where // out such events, which will time out in storage match event { EthereumEvent::TransfersToEthereum { - nonce: ext_nonce, - transfers, - valid_transfers_map, - .. + nonce: ext_nonce, .. } => { - if transfers.len() != valid_transfers_map.len() { - tracing::debug!( - transfers_len = transfers.len(), - valid_transfers_map_len = valid_transfers_map.len(), - "{}", - VoteExtensionError::TransfersLenMismatch - ); - return Err(VoteExtensionError::TransfersLenMismatch); - } let current_bp_nonce = self.wl_storage.ethbridge_queries().get_bridge_pool_nonce(); if ¤t_bp_nonce != ext_nonce { @@ -249,20 +224,8 @@ where } } EthereumEvent::TransfersToNamada { - nonce: ext_nonce, - transfers, - valid_transfers_map, - .. + nonce: ext_nonce, .. } => { - if transfers.len() != valid_transfers_map.len() { - tracing::debug!( - transfers_len = transfers.len(), - valid_transfers_map_len = valid_transfers_map.len(), - "{}", - VoteExtensionError::TransfersLenMismatch - ); - return Err(VoteExtensionError::TransfersLenMismatch); - } let next_nam_transfers_nonce = self .wl_storage .ethbridge_queries() @@ -514,7 +477,6 @@ mod test_vote_extensions { .validate_eth_event(&EthereumEvent::TransfersToEthereum { nonce, transfers: vec![], - valid_transfers_map: vec![], relayer: gen_established_address(), }) .expect("Test failed"); @@ -524,7 +486,6 @@ mod test_vote_extensions { .validate_eth_event(&EthereumEvent::TransfersToEthereum { nonce: nonce + 1, transfers: vec![], - valid_transfers_map: vec![], relayer: gen_established_address(), }) .expect_err("Test failed"); @@ -532,7 +493,6 @@ mod test_vote_extensions { .validate_eth_event(&EthereumEvent::TransfersToEthereum { nonce: nonce - 1, transfers: vec![], - valid_transfers_map: vec![], relayer: gen_established_address(), }) .expect_err("Test failed"); @@ -542,14 +502,12 @@ mod test_vote_extensions { .validate_eth_event(&EthereumEvent::TransfersToNamada { nonce, transfers: vec![], - valid_transfers_map: vec![], }) .expect("Test failed"); shell .validate_eth_event(&EthereumEvent::TransfersToNamada { nonce: nonce + 5, transfers: vec![], - valid_transfers_map: vec![], }) .expect("Test failed"); @@ -558,32 +516,12 @@ mod test_vote_extensions { .validate_eth_event(&EthereumEvent::TransfersToNamada { nonce: nonce - 1, transfers: vec![], - valid_transfers_map: vec![], }) .expect_err("Test failed"); shell .validate_eth_event(&EthereumEvent::TransfersToNamada { nonce: nonce - 2, transfers: vec![], - valid_transfers_map: vec![], - }) - .expect_err("Test failed"); - - // either kind of transfer with different validity map and transfer - // array length are invalid - shell - .validate_eth_event(&EthereumEvent::TransfersToEthereum { - nonce, - transfers: vec![], - valid_transfers_map: vec![true, true], - relayer: gen_established_address(), - }) - .expect_err("Test failed"); - shell - .validate_eth_event(&EthereumEvent::TransfersToNamada { - nonce, - transfers: vec![], - valid_transfers_map: vec![true, true], }) .expect_err("Test failed"); } @@ -605,7 +543,6 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }; let event_2 = EthereumEvent::TransfersToEthereum { @@ -616,34 +553,41 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }; - let event_3 = EthereumEvent::NewContract { - name: "Test".to_string(), - address: EthAddress([0; 20]), + let event_3 = EthereumEvent::TransfersToNamada { + nonce: 0.into(), + transfers: vec![], }; tokio_test::block_on(oracle.send(event_1.clone())) .expect("Test failed"); tokio_test::block_on(oracle.send(event_3.clone())) .expect("Test failed"); - let [event_first, event_second]: [EthereumEvent; 2] = + + let got_events: [EthereumEvent; 2] = shell.new_ethereum_events().try_into().expect("Test failed"); + let expected_events: Vec<_> = std::collections::BTreeSet::from([ + event_1.clone(), + event_3.clone(), + ]) + .into_iter() + .collect(); + assert_eq!(expected_events, got_events); - assert_eq!(event_first, event_1); - assert_eq!(event_second, event_3); // check that we queue and de-duplicate events tokio_test::block_on(oracle.send(event_2.clone())) .expect("Test failed"); tokio_test::block_on(oracle.send(event_3.clone())) .expect("Test failed"); - let [event_first, event_second, event_third]: [EthereumEvent; 3] = - shell.new_ethereum_events().try_into().expect("Test failed"); - assert_eq!(event_first, event_1); - assert_eq!(event_second, event_2); - assert_eq!(event_third, event_3); + let got_events: [EthereumEvent; 3] = + shell.new_ethereum_events().try_into().expect("Test failed"); + let expected_events: Vec<_> = + std::collections::BTreeSet::from([event_1, event_2, event_3]) + .into_iter() + .collect(); + assert_eq!(expected_events, got_events); } /// Test that ethereum events are added to vote extensions. @@ -665,7 +609,6 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }; let event_2 = EthereumEvent::NewContract { @@ -725,7 +668,6 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }], block_height: shell @@ -817,7 +759,6 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }], block_height: signed_height, @@ -922,7 +863,6 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }], block_height: shell.wl_storage.storage.get_last_block_height(), @@ -1002,7 +942,6 @@ mod test_vote_extensions { receiver: EthAddress([2; 20]), checksum: Hash::default(), }], - valid_transfers_map: vec![true], relayer: gen_established_address(), }], block_height: shell.wl_storage.storage.get_last_block_height(), diff --git a/apps/src/lib/wallet/cli_utils.rs b/apps/src/lib/wallet/cli_utils.rs index ffaf0808413..72bb0acaab2 100644 --- a/apps/src/lib/wallet/cli_utils.rs +++ b/apps/src/lib/wallet/cli_utils.rs @@ -4,8 +4,8 @@ use std::io::{self, Write}; use borsh::BorshSerialize; use itertools::sorted; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::ledger::masp::find_valid_diversifier; -use namada::ledger::wallet::{DecryptionError, FindKeyError}; +use namada::sdk::masp::find_valid_diversifier; +use namada::sdk::wallet::{DecryptionError, FindKeyError}; use namada::types::key::{PublicKeyHash, RefTo}; use namada::types::masp::{MaspValue, PaymentAddress}; use rand_core::OsRng; diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index 88d9589e69e..00b0f49d269 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -8,8 +8,8 @@ pub use dev::{ validator_keys, }; use namada::core::ledger::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; -use namada::ledger::wallet::alias::Alias; use namada::ledger::{eth_bridge, governance, pgf, pos}; +use namada::sdk::wallet::alias::Alias; use namada::types::address::Address; use namada::types::key::*; @@ -77,8 +77,8 @@ mod dev { use std::collections::HashMap; use borsh::BorshDeserialize; - use namada::ledger::wallet::alias::Alias; use namada::ledger::{governance, pgf, pos}; + use namada::sdk::wallet::alias::Alias; use namada::types::address::{ apfel, btc, dot, eth, kartoffel, nam, schnitzel, Address, }; diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index 7c9c193087e..f6611ebe180 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -9,12 +9,12 @@ use std::str::FromStr; use std::{env, fs}; use namada::bip39::{Language, Mnemonic}; -pub use namada::ledger::wallet::alias::Alias; -use namada::ledger::wallet::{ +pub use namada::sdk::wallet::alias::Alias; +use namada::sdk::wallet::{ AddressVpType, ConfirmationResponse, FindKeyError, GenRestoreKeyError, Wallet, WalletUtils, }; -pub use namada::ledger::wallet::{ValidatorData, ValidatorKeys}; +pub use namada::sdk::wallet::{ValidatorData, ValidatorKeys}; use namada::types::address::Address; use namada::types::key::*; use rand_core::OsRng; @@ -309,7 +309,7 @@ pub fn read_and_confirm_encryption_password( #[cfg(test)] mod tests { use namada::bip39::MnemonicType; - use namada::ledger::wallet::WalletUtils; + use namada::sdk::wallet::WalletUtils; use rand_core; use super::CliWalletUtils; diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 3b05bb214fe..21a80267f1d 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -3,10 +3,10 @@ use std::path::{Path, PathBuf}; use ark_serialize::{Read, Write}; use fd_lock::RwLock; -use namada::ledger::wallet::pre_genesis::{ +use namada::sdk::wallet::pre_genesis::{ ReadError, ValidatorStore, ValidatorWallet, }; -use namada::ledger::wallet::{gen_key_to_store, WalletUtils}; +use namada::sdk::wallet::{gen_key_to_store, WalletUtils}; use namada::types::key::SchemeType; use zeroize::Zeroizing; diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index 6ae0d023d94..0f2aa86b7be 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -9,10 +9,10 @@ use ark_std::rand::prelude::*; use ark_std::rand::SeedableRng; use fd_lock::RwLock; #[cfg(not(feature = "dev"))] -use namada::ledger::wallet::store::AddressVpType; +use namada::sdk::wallet::store::AddressVpType; #[cfg(feature = "dev")] -use namada::ledger::wallet::StoredKeypair; -use namada::ledger::wallet::{gen_sk_rng, Store, ValidatorKeys}; +use namada::sdk::wallet::StoredKeypair; +use namada::sdk::wallet::{gen_sk_rng, Store, ValidatorKeys}; #[cfg(not(feature = "dev"))] use namada::types::address::Address; use namada::types::key::*; diff --git a/benches/host_env.rs b/benches/host_env.rs index 6a3cc6e37b8..6f385b93bc7 100644 --- a/benches/host_env.rs +++ b/benches/host_env.rs @@ -1,9 +1,11 @@ +use std::collections::HashSet; + use borsh::BorshSerialize; use criterion::{criterion_group, criterion_main, Criterion}; use namada::core::types::account::AccountPublicKeysMap; use namada::core::types::address; use namada::core::types::token::{Amount, Transfer}; -use namada::proto::{Data, MultiSignature, Section}; +use namada::proto::{Data, Section, Signature}; use namada_apps::wallet::defaults; /// Benchmarks the validation of a single signature on a single `Section` of a @@ -24,17 +26,16 @@ fn tx_section_signature_validation(c: &mut Criterion) { defaults::albert_keypair().to_public() ]); - let multisig = MultiSignature::new( + let multisig = Signature::new( vec![section_hash], - &[defaults::albert_keypair()], - &pkim, + pkim.index_secret_keys(vec![defaults::albert_keypair()]), + None, ); - let signature_index = multisig.signatures.first().unwrap().clone(); c.bench_function("tx_section_signature_validation", |b| { b.iter(|| { - signature_index - .verify(&pkim, &multisig.get_raw_hash()) + multisig + .verify_signature(&mut HashSet::new(), &pkim, &None) .unwrap() }) }); diff --git a/benches/lib.rs b/benches/lib.rs index 11042c84b3a..a723048e876 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -62,22 +62,23 @@ use namada::ibc::core::Msg; use namada::ibc::Height as IbcHeight; use namada::ibc_proto::google::protobuf::Any; use namada::ibc_proto::protobuf::Protobuf; -use namada::ledger::args::InputAmount; use namada::ledger::gas::TxGasMeter; use namada::ledger::ibc::storage::{channel_key, connection_key}; -use namada::ledger::masp::{ - self, ShieldedContext, ShieldedTransfer, ShieldedUtils, -}; use namada::ledger::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; -use namada::ledger::wallet::Wallet; use namada::proof_of_stake; use namada::proto::{Code, Data, Section, Signature, Tx}; +use namada::sdk::args::InputAmount; +use namada::sdk::masp::{ + self, ShieldedContext, ShieldedTransfer, ShieldedUtils, +}; +use namada::sdk::wallet::Wallet; use namada::tendermint::Hash; use namada::tendermint_rpc::{self}; use namada::types::address::InternalAddress; use namada::types::chain::ChainId; +use namada::types::io::DefaultIo; use namada::types::masp::{ ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; @@ -406,10 +407,7 @@ pub fn generate_tx( signer: Option<&SecretKey>, ) -> Tx { let mut tx = Tx::from_type(namada::types::transaction::TxType::Decrypted( - namada::types::transaction::DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: true, - }, + namada::types::transaction::DecryptedTx::Decrypted, )); // NOTE: don't use the hash to avoid computing the cost of loading the wasm @@ -435,7 +433,8 @@ pub fn generate_tx( if let Some(signer) = signer { tx.add_section(Section::Signature(Signature::new( tx.sechashes(), - signer, + [(0, signer.clone())].into_iter().collect(), + None, ))); } @@ -445,10 +444,7 @@ pub fn generate_tx( pub fn generate_ibc_tx(wasm_code_path: &str, msg: impl Msg) -> Tx { // This function avoid serializaing the tx data with Borsh let mut tx = Tx::from_type(namada::types::transaction::TxType::Decrypted( - namada::types::transaction::DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: true, - }, + namada::types::transaction::DecryptedTx::Decrypted, )); tx.set_code(Code::new(wasm_loader::read_wasm_or_exit( WASM_DIR, @@ -467,10 +463,7 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { let wasm_code = std::fs::read("../wasm_for_tests/tx_write.wasm").unwrap(); let mut tx = Tx::from_type(namada::types::transaction::TxType::Decrypted( - namada::types::transaction::DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: true, - }, + namada::types::transaction::DecryptedTx::Decrypted, )); tx.set_code(Code::new(wasm_code)); tx.set_data(Data::new( @@ -481,7 +474,11 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { .try_to_vec() .unwrap(), )); - tx.add_section(Section::Signature(Signature::new(tx.sechashes(), signer))); + tx.add_section(Section::Signature(Signature::new( + tx.sechashes(), + [(0, signer.clone())].into_iter().collect(), + None, + ))); tx } @@ -662,12 +659,13 @@ impl Default for BenchShieldedCtx { fn default() -> Self { let mut shell = BenchShell::default(); - let mut ctx = Context::new(namada_apps::cli::args::Global { - chain_id: None, - base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), - wasm_dir: Some(WASM_DIR.into()), - }) - .unwrap(); + let mut ctx = + Context::new::(namada_apps::cli::args::Global { + chain_id: None, + base_dir: shell.tempdir.as_ref().canonicalize().unwrap(), + wasm_dir: Some(WASM_DIR.into()), + }) + .unwrap(); // Generate spending key for Albert and Bertha ctx.wallet.gen_spending_key( @@ -700,7 +698,7 @@ impl Default for BenchShieldedCtx { .fvk .vk; let (div, _g_d) = - namada::ledger::masp::find_valid_diversifier(&mut OsRng); + namada::sdk::masp::find_valid_diversifier(&mut OsRng); let payment_addr = viewing_key.to_payment_address(div).unwrap(); let _ = ctx .wallet @@ -784,7 +782,10 @@ impl BenchShieldedCtx { )) .unwrap(); let shielded = async_runtime - .block_on(self.shielded.gen_shielded_transfer(&self.shell, args)) + .block_on( + self.shielded + .gen_shielded_transfer::<_, DefaultIo>(&self.shell, args), + ) .unwrap() .map( |ShieldedTransfer { diff --git a/benches/process_wrapper.rs b/benches/process_wrapper.rs index 72b26e0cb18..fc09d6f65e3 100644 --- a/benches/process_wrapper.rs +++ b/benches/process_wrapper.rs @@ -42,14 +42,13 @@ fn process_tx(c: &mut Criterion) { defaults::albert_keypair().ref_to(), 0.into(), 1000.into(), - #[cfg(not(feature = "mainnet"))] - None, None, ), ))); tx.add_section(namada::proto::Section::Signature(Signature::new( tx.sechashes(), - &defaults::albert_keypair(), + [(0, defaults::albert_keypair())].into_iter().collect(), + None, ))); let wrapper = tx.to_bytes(); diff --git a/benches/vps.rs b/benches/vps.rs index 20e6055885f..c7fdfafa0e0 100644 --- a/benches/vps.rs +++ b/benches/vps.rs @@ -149,7 +149,7 @@ fn vp_user(c: &mut Criterion) { b.iter(|| { assert!( run::vp( - &vp_code_hash, + vp_code_hash, signed_tx, &TxIndex(0), &defaults::albert_address(), @@ -161,8 +161,6 @@ fn vp_user(c: &mut Criterion) { &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap() ); @@ -296,7 +294,7 @@ fn vp_implicit(c: &mut Criterion) { b.iter(|| { assert!( run::vp( - &vp_code_hash, + vp_code_hash, tx, &TxIndex(0), &Address::from(&implicit_account.to_public()), @@ -308,8 +306,6 @@ fn vp_implicit(c: &mut Criterion) { &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap() ) @@ -447,7 +443,7 @@ fn vp_validator(c: &mut Criterion) { b.iter(|| { assert!( run::vp( - &vp_code_hash, + vp_code_hash, signed_tx, &TxIndex(0), &defaults::validator_address(), @@ -459,8 +455,6 @@ fn vp_validator(c: &mut Criterion) { &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap() ); @@ -538,7 +532,7 @@ fn vp_masp(c: &mut Criterion) { b.iter(|| { assert!( run::vp( - &vp_code_hash, + vp_code_hash, &signed_tx, &TxIndex(0), &defaults::validator_address(), @@ -550,8 +544,6 @@ fn vp_masp(c: &mut Criterion) { &keys_changed, &verifiers, shielded_ctx.shell.vp_wasm_cache.clone(), - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap() ); diff --git a/core/src/ledger/eth_bridge/storage/bridge_pool.rs b/core/src/ledger/eth_bridge/storage/bridge_pool.rs index 167e23e7797..78e15b5d388 100644 --- a/core/src/ledger/eth_bridge/storage/bridge_pool.rs +++ b/core/src/ledger/eth_bridge/storage/bridge_pool.rs @@ -15,6 +15,14 @@ use crate::types::hash::Hash; use crate::types::keccak::{keccak_hash, KeccakHash}; use crate::types::storage::{BlockHeight, DbKeySeg, Key, KeySeg}; +/// Prefix to be used in Bridge pool tree root computations. +/// This value corresponds to leaf nodes. +const POOL_ROOT_PREFIX_LEAF: u8 = 0x00; + +/// Prefix to be used in Bridge pool tree root computations. +/// This value corresponds to non-leaf nodes. +const POOL_ROOT_PREFIX_NON_LEAF: u8 = 0xff; + /// The main address of the Ethereum bridge pool pub const BRIDGE_POOL_ADDRESS: Address = Address::Internal(InternalAddress::EthBridgePool); @@ -156,14 +164,16 @@ impl BridgePoolTree { /// Compute the root of the merkle tree fn compute_root(&self) -> KeccakHash { let mut hashes: Vec = self.leaves.keys().cloned().collect(); + let mut prefix = POOL_ROOT_PREFIX_LEAF; while hashes.len() > 1 { let mut next_hashes = vec![]; for pair in hashes.chunks(2) { let left = pair[0].clone(); let right = pair.get(1).cloned().unwrap_or_default(); - next_hashes.push(hash_pair(left, right)); + next_hashes.push(hash_pair(left, right, prefix)); } hashes = next_hashes; + prefix = POOL_ROOT_PREFIX_NON_LEAF; } if hashes.is_empty() { @@ -215,6 +225,8 @@ impl BridgePoolTree { }) .collect(); + let mut prefix = POOL_ROOT_PREFIX_LEAF; + while hashes.len() > 1 { let mut next_hashes = vec![]; @@ -224,30 +236,41 @@ impl BridgePoolTree { match (left, right) { (Node::OnPath(left), Node::OnPath(right)) => { flags.push(true); - next_hashes - .push(Node::OnPath(hash_pair(left.clone(), right))); + next_hashes.push(Node::OnPath(hash_pair( + left.clone(), + right, + prefix, + ))); } (Node::OnPath(hash), Node::OffPath(sib)) => { flags.push(false); proof_hashes.push(sib.clone()); - next_hashes - .push(Node::OnPath(hash_pair(hash.clone(), sib))); + next_hashes.push(Node::OnPath(hash_pair( + hash.clone(), + sib, + prefix, + ))); } (Node::OffPath(sib), Node::OnPath(hash)) => { flags.push(false); proof_hashes.push(sib.clone()); - next_hashes - .push(Node::OnPath(hash_pair(hash, sib.clone()))); + next_hashes.push(Node::OnPath(hash_pair( + hash, + sib.clone(), + prefix, + ))); } (Node::OffPath(left), Node::OffPath(right)) => { next_hashes.push(Node::OffPath(hash_pair( left.clone(), right, + prefix, ))); } } } hashes = next_hashes; + prefix = POOL_ROOT_PREFIX_NON_LEAF; } // add the root to the proof if flags.is_empty() && proof_hashes.is_empty() && leaves.is_empty() { @@ -285,14 +308,21 @@ impl BridgePoolTree { } } -/// Concatenate two keccak hashes and hash the result +/// Concatenate a byte prefix and two keccak hashes, +/// then compute the keccak hash of the resulting +/// byte array. #[inline] -fn hash_pair(left: KeccakHash, right: KeccakHash) -> KeccakHash { +fn hash_pair(left: KeccakHash, right: KeccakHash, prefix: u8) -> KeccakHash { + let mut buf = [0u8; 32 + 32 + 1]; + buf[0] = prefix; if left.0 < right.0 { - keccak_hash([left.0, right.0].concat().as_slice()) + buf[1..33].copy_from_slice(&left.0); + buf[33..].copy_from_slice(&right.0); } else { - keccak_hash([right.0, left.0].concat().as_slice()) + buf[1..33].copy_from_slice(&right.0); + buf[33..].copy_from_slice(&left.0); } + keccak_hash(buf) } /// Keeps track if a node is on a path from the @@ -348,14 +378,14 @@ impl BridgePoolProof { let mut proof_pos = 0usize; for i in 0..total_hashes { - let left = if leaf_pos < leaf_len { + let (left, prefix) = if leaf_pos < leaf_len { let next = self.leaves[leaf_pos].keccak256(); leaf_pos += 1; - next + (next, POOL_ROOT_PREFIX_LEAF) } else { let next = hashes[hash_pos].clone(); hash_pos += 1; - next + (next, POOL_ROOT_PREFIX_NON_LEAF) }; let right = if self.flags[i] { if leaf_pos < leaf_len { @@ -372,7 +402,7 @@ impl BridgePoolProof { proof_pos += 1; next }; - hashes[i] = hash_pair(left, right); + hashes[i] = hash_pair(left, right, prefix); } if let Some(computed) = hashes.last() { @@ -479,8 +509,11 @@ mod test_bridge_pool_tree { transfers.push(transfer); let _ = tree.insert_key(&key, BlockHeight(1)).expect("Test failed"); } - let expected = - hash_pair(transfers[0].keccak256(), transfers[1].keccak256()); + let expected = hash_pair( + transfers[0].keccak256(), + transfers[1].keccak256(), + POOL_ROOT_PREFIX_LEAF, + ); assert_eq!(tree.root(), expected); } @@ -516,11 +549,18 @@ mod test_bridge_pool_tree { tree.leaves.keys().cloned().collect::>() ); - let left_hash = - hash_pair(transfers[0].keccak256(), transfers[1].keccak256()); - let right_hash = - hash_pair(transfers[2].keccak256(), Default::default()); - let expected = hash_pair(left_hash, right_hash); + let left_hash = hash_pair( + transfers[0].keccak256(), + transfers[1].keccak256(), + POOL_ROOT_PREFIX_LEAF, + ); + let right_hash = hash_pair( + transfers[2].keccak256(), + Default::default(), + POOL_ROOT_PREFIX_LEAF, + ); + let expected = + hash_pair(left_hash, right_hash, POOL_ROOT_PREFIX_NON_LEAF); assert_eq!(tree.root(), expected); } @@ -581,8 +621,11 @@ mod test_bridge_pool_tree { let deleted_key = Key::from(&transfers[1]); tree.delete_key(&deleted_key).expect("Test failed"); - let expected = - hash_pair(transfers[0].keccak256(), transfers[2].keccak256()); + let expected = hash_pair( + transfers[0].keccak256(), + transfers[2].keccak256(), + POOL_ROOT_PREFIX_LEAF, + ); assert_eq!(tree.root(), expected); assert_matches!(tree.get(&deleted_key), Err(_)); } diff --git a/core/src/ledger/eth_bridge/storage/mod.rs b/core/src/ledger/eth_bridge/storage/mod.rs index e728603fb7f..5e11db64efe 100644 --- a/core/src/ledger/eth_bridge/storage/mod.rs +++ b/core/src/ledger/eth_bridge/storage/mod.rs @@ -63,11 +63,6 @@ pub fn bridge_contract_key() -> Key { get_bridge_contract_address_key_at_addr(PARAM_ADDRESS) } -/// Storage key for the Ethereum address of the governance contract. -pub fn governance_contract_key() -> Key { - get_governance_contract_address_key_at_addr(PARAM_ADDRESS) -} - #[cfg(test)] mod test { use super::*; diff --git a/core/src/ledger/governance/cli/offline.rs b/core/src/ledger/governance/cli/offline.rs index 3f9feeb1dce..fb56a1270a4 100644 --- a/core/src/ledger/governance/cli/offline.rs +++ b/core/src/ledger/governance/cli/offline.rs @@ -315,18 +315,13 @@ fn compute_signatures_index( account_public_keys_map: &AccountPublicKeysMap, hashed_data: &Hash, ) -> BTreeSet { - keys.iter() - .filter_map(|signing_key| { + account_public_keys_map + .index_secret_keys(keys.to_vec()) + .values() + .map(|signing_key| { let public_key = signing_key.ref_to(); - let public_key_index = - account_public_keys_map.get_index_from_public_key(&public_key); - if public_key_index.is_some() { - let signature = - common::SigScheme::sign(signing_key, hashed_data); - Some(SignatureIndex::from_single_signature(signature)) - } else { - None - } + let signature = common::SigScheme::sign(signing_key, hashed_data); + SignatureIndex::from_single_signature(public_key, signature) }) .collect::>() } @@ -338,11 +333,12 @@ fn compute_total_valid_signatures( hashed_data: &Hash, ) -> u8 { signatures.iter().fold(0_u8, |acc, signature_index| { - let public_key = account_public_keys_map - .get_public_key_from_index(signature_index.index); - if let Some(pk) = public_key { + if account_public_keys_map + .get_index_from_public_key(&signature_index.pubkey) + .is_some() + { let sig_check = common::SigScheme::verify_signature( - &pk, + &signature_index.pubkey, hashed_data, &signature_index.signature, ); diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index 9a84fbc1264..890d58044d1 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -10,6 +10,5 @@ pub mod pgf; pub mod replay_protection; pub mod storage; pub mod storage_api; -pub mod testnet_pow; pub mod tx_env; pub mod vp_env; diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs index 5be01fde56b..31801cf7c5c 100644 --- a/core/src/ledger/parameters/mod.rs +++ b/core/src/ledger/parameters/mod.rs @@ -61,9 +61,6 @@ pub struct Parameters { pub staked_ratio: Dec, /// PoS inflation amount from the last epoch (read + write for every epoch) pub pos_inflation_amount: token::Amount, - #[cfg(not(feature = "mainnet"))] - /// Faucet account for free token withdrawal - pub faucet_account: Option
, /// Fee unshielding gas limit pub fee_unshielding_gas_limit: u64, /// Fee unshielding descriptions limit @@ -133,8 +130,6 @@ impl Parameters { pos_gain_d, staked_ratio, pos_inflation_amount, - #[cfg(not(feature = "mainnet"))] - faucet_account, gas_cost, fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, @@ -218,12 +213,6 @@ impl Parameters { let pos_inflation_key = storage::get_pos_inflation_amount_key(); storage.write(&pos_inflation_key, pos_inflation_amount)?; - #[cfg(not(feature = "mainnet"))] - if let Some(faucet_account) = faucet_account { - let faucet_account_key = storage::get_faucet_account_key(); - storage.write(&faucet_account_key, faucet_account)?; - } - let gas_cost_key = storage::get_gas_cost_key(); storage.write(&gas_cost_key, gas_cost)?; @@ -414,18 +403,6 @@ where .into_storage_result() } -#[cfg(not(feature = "mainnet"))] -/// Read the faucet account's address, if any -pub fn read_faucet_account_parameter( - storage: &S, -) -> storage_api::Result> -where - S: StorageRead, -{ - let faucet_account_key = storage::get_faucet_account_key(); - storage.read(&faucet_account_key) -} - /// Read the cost per unit of gas for the provided token pub fn read_gas_cost( storage: &S, @@ -558,10 +535,6 @@ where .ok_or(ReadError::ParametersMissing) .into_storage_result()?; - // read faucet account - #[cfg(not(feature = "mainnet"))] - let faucet_account = read_faucet_account_parameter(storage)?; - // read gas cost let gas_cost_key = storage::get_gas_cost_key(); let value = storage.read(&gas_cost_key)?; @@ -583,8 +556,6 @@ where pos_gain_d, staked_ratio, pos_inflation_amount, - #[cfg(not(feature = "mainnet"))] - faucet_account, gas_cost, fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, diff --git a/core/src/ledger/parameters/storage.rs b/core/src/ledger/parameters/storage.rs index 3e877990164..4b4b85822f8 100644 --- a/core/src/ledger/parameters/storage.rs +++ b/core/src/ledger/parameters/storage.rs @@ -23,8 +23,6 @@ struct Keys { native_erc20: &'static str, /// Sub-lkey for storing the Ethereum address of the bridge contract. bridge_contract_address: &'static str, - /// Sub-key for storing the Ethereum address of the governance contract. - governance_contract_address: &'static str, // ======================================== // PoS parameters // ======================================== @@ -43,7 +41,6 @@ struct Keys { vp_whitelist: &'static str, max_proposal_bytes: &'static str, max_block_gas: &'static str, - faucet_account: &'static str, gas_cost: &'static str, fee_unshielding_gas_limit: &'static str, fee_unshielding_descriptions_limit: &'static str, @@ -193,11 +190,6 @@ pub fn get_max_block_gas_key() -> Key { get_max_block_gas_key_at_addr(ADDRESS) } -/// Storage key used for faucet account. -pub fn get_faucet_account_key() -> Key { - get_faucet_account_key_at_addr(ADDRESS) -} - /// Storage key used for the gas cost table pub fn get_gas_cost_key() -> Key { get_gas_cost_key_at_addr(ADDRESS) diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 507c3b4e020..327e8525fa5 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -43,6 +43,7 @@ use crate::types::address::{ }; use crate::types::chain::{ChainId, CHAIN_ID_LENGTH}; use crate::types::hash::{Error as HashError, Hash}; +use crate::types::internal::ExpiredTxsQueue; // TODO #[cfg(feature = "ferveo-tpke")] use crate::types::internal::TxQueue; @@ -104,6 +105,12 @@ where /// Wrapper txs to be decrypted in the next block proposal #[cfg(feature = "ferveo-tpke")] pub tx_queue: TxQueue, + /// Queue of expired transactions that need to be retransmitted. + /// + /// These transactions do not need to be persisted, as they are + /// retransmitted at the **COMMIT** phase immediately following + /// the block when they were queued. + pub expired_txs_queue: ExpiredTxsQueue, /// The latest block height on Ethereum processed, if /// the bridge is enabled. pub ethereum_height: Option, @@ -412,6 +419,7 @@ where conversion_state: ConversionState::default(), #[cfg(feature = "ferveo-tpke")] tx_queue: TxQueue::default(), + expired_txs_queue: ExpiredTxsQueue::default(), native_token, ethereum_height: None, eth_events_queue: EthEventsQueue::default(), @@ -1168,6 +1176,7 @@ pub mod testing { conversion_state: ConversionState::default(), #[cfg(feature = "ferveo-tpke")] tx_queue: TxQueue::default(), + expired_txs_queue: ExpiredTxsQueue::default(), native_token: address::nam(), ethereum_height: None, eth_events_queue: EthEventsQueue::default(), @@ -1283,8 +1292,6 @@ mod tests { pos_gain_d: Dec::new(1,1).expect("Cannot fail"), staked_ratio: Dec::new(1,1).expect("Cannot fail"), pos_inflation_amount: token::Amount::zero(), - #[cfg(not(feature = "mainnet"))] - faucet_account: None, fee_unshielding_gas_limit: 20_000, fee_unshielding_descriptions_limit: 15, gas_cost: BTreeMap::default(), diff --git a/core/src/ledger/testnet_pow.rs b/core/src/ledger/testnet_pow.rs deleted file mode 100644 index dcb7c9feb20..00000000000 --- a/core/src/ledger/testnet_pow.rs +++ /dev/null @@ -1,491 +0,0 @@ -//! PoW challenge is used for testnet zero-fee transaction to prevent spam. - -use std::fmt::Display; - -use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_macros::StorageKeys; -use serde::{Deserialize, Serialize}; - -use super::storage_api::collections::{lazy_map, LazyCollection}; -use super::storage_api::{self, StorageRead, StorageWrite}; -use crate::ledger::storage_api::collections::LazyMap; -use crate::types::address::Address; -use crate::types::hash::Hash; -use crate::types::storage::{self, DbKeySeg, Key}; -use crate::types::uint::Uint; - -/// Initialize faucet's storage. This must be called at genesis if faucet -/// account is being used. -pub fn init_faucet_storage( - storage: &mut S, - address: &Address, - difficulty: Difficulty, - withdrawal_limit: Uint, -) -> storage_api::Result<()> -where - S: StorageWrite, -{ - write_difficulty(storage, address, difficulty)?; - write_withdrawal_limit(storage, address, withdrawal_limit) -} - -/// Counters are associated with transfer target addresses. -pub type Counter = u64; - -/// A PoW challenge that must be provably solved to withdraw from faucet. -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - PartialEq, - Serialize, - Deserialize, -)] -pub struct Challenge { - /// The address derived from the `WrapperTx`'s signer `pk` field - pub source: Address, - /// Parameters - pub params: ChallengeParams, -} - -/// PoW challenge parameters. -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshSchema, - PartialEq, - Serialize, - Deserialize, -)] -pub struct ChallengeParams { - /// PoW difficulty - pub difficulty: Difficulty, - /// The counter value of the `transfer.target`. - pub counter: Counter, -} - -/// One must find a value for this type to solve a [`Challenge`] that is at -/// least of the matching difficulty of the challenge. -pub type SolutionValue = u64; -/// Size of `SolutionValue` when serialized with borsh -const SOLUTION_VAL_BYTES_LEN: usize = 8; - -/// A [`SolutionValue`] with the [`Challenge`]. -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshSchema, - Serialize, - Deserialize, -)] -pub struct Solution { - /// Challenge params - pub params: ChallengeParams, - /// Solution value, that produces hash with at least `difficulty` leading - /// zeros - pub value: SolutionValue, -} - -impl ChallengeParams { - /// Obtain a PoW challenge for a given transfer. - pub fn new( - storage: &mut S, - faucet_address: &Address, - source: &Address, - ) -> storage_api::Result - where - S: StorageRead + StorageWrite, - { - let difficulty = read_difficulty(storage, faucet_address)?; - let counter = get_counter(storage, faucet_address, source)?; - Ok(Self { - difficulty, - counter, - }) - } -} - -impl Challenge { - /// Obtain a PoW challenge for a given transfer. - pub fn new( - storage: &mut S, - faucet_address: &Address, - source: Address, - ) -> storage_api::Result - where - S: StorageRead + StorageWrite, - { - let params = ChallengeParams::new(storage, faucet_address, &source)?; - Ok(Self { source, params }) - } - - /// Try to find a solution to the [`Challenge`]. - pub fn solve(self) -> Solution { - use std::io::Write; - - println!( - "Looking for a solution with difficulty {}...", - self.params.difficulty - ); - let challenge_bytes = self.try_to_vec().expect("Serializable"); - let challenge_len = challenge_bytes.len(); - let mut stdout = std::io::stdout(); - - // Pre-allocate for the bytes - let mut bytes: Vec = - vec![0; challenge_bytes.len() + SOLUTION_VAL_BYTES_LEN]; - - // Set the first part from `challenge_bytes`... - for (old, new) in - bytes[0..challenge_len].iter_mut().zip(&challenge_bytes[..]) - { - *old = *new; - } - let mut maybe_solution: SolutionValue = 0; - 'outer: loop { - stdout.flush().unwrap(); - print!("\rChecking {}.", maybe_solution); - let solution_bytes = - maybe_solution.try_to_vec().expect("Serializable"); - // ...and the second part from `solution_bytes` - for (old, new) in - bytes[challenge_len..].iter_mut().zip(&solution_bytes[..]) - { - *old = *new; - } - let hash = Hash::sha256(&bytes); - - // Check if it's a solution - for i in 0..self.params.difficulty.0 as usize { - if hash.0[i] != b'0' { - maybe_solution += 1; - continue 'outer; - } - } - - println!(); - println!("Found a solution: {}.", maybe_solution); - stdout.flush().unwrap(); - return Solution { - params: self.params, - value: maybe_solution, - }; - } - } -} - -impl Solution { - /// Invalidate a solution if it's valid so that it cannot be used again by - /// updating the counter in storage. - pub fn invalidate_if_valid( - &self, - storage: &mut S, - faucet_address: &Address, - source: &Address, - ) -> storage_api::Result - where - S: StorageWrite + StorageRead, - { - if self.validate(storage, faucet_address, source.clone())? { - self.apply_from_tx(storage, faucet_address, source)?; - Ok(true) - } else { - Ok(false) - } - } - - /// Apply a solution from a tx so that it cannot be used again. - pub fn apply_from_tx( - &self, - storage: &mut S, - faucet_address: &Address, - source: &Address, - ) -> storage_api::Result<()> - where - S: StorageWrite + StorageRead, - { - increment_counter(storage, faucet_address, source, self.params.counter) - } - - /// Verify a solution and that the counter has been increment to prevent - /// solution replay. - /// The difficulty of the challenge must match the one set in faucet's - /// storage and the counter value. - pub fn validate( - &self, - storage: &S, - faucet_address: &Address, - source: Address, - ) -> storage_api::Result - where - S: StorageRead, - { - let counter = get_counter(storage, faucet_address, &source)?; - // Check that the counter matches expected counter - if self.params.counter != counter { - return Ok(false); - } - // Check that the difficulty matches expected difficulty - let current_difficulty = read_difficulty(storage, faucet_address)?; - if self.params.difficulty != current_difficulty { - return Ok(false); - } - - // Check the solution itself - if !self.verify_solution(source) { - return Ok(false); - } - - Ok(true) - } - - /// Verify that the given solution is correct. Note that this doesn't check - /// the difficulty or the counter. - pub fn verify_solution(&self, source: Address) -> bool { - let challenge = Challenge { - source, - params: self.params.clone(), - }; - let mut bytes = challenge.try_to_vec().expect("Serializable"); - let mut solution_bytes = self.value.try_to_vec().expect("Serializable"); - bytes.append(&mut solution_bytes); - let hash = Hash::sha256(&bytes); - - // Check if it's a solution - for i in 0..challenge.params.difficulty.0 as usize { - if hash.0[i] != b'0' { - return false; - } - } - - true - } -} - -/// Storage keys -#[derive(StorageKeys)] -pub struct Keys { - /// Withdrawal counters associated with recipient addresses. To withdraw - /// tokens from faucet, one must find a solution to a PoW challenge - /// containing the current value of their counter (or `0` is none). - counters: &'static str, - /// PoW difficulty - pow_difficulty: &'static str, - /// withdrawal limit - withdrawal_limit: &'static str, -} - -/// Storage key prefix to the `counters` field. The rest of the key is composed -/// from `LazyMap` stored at this key. -pub fn counter_prefix(address: &Address) -> storage::Key { - storage::Key { - segments: vec![ - DbKeySeg::AddressSeg(address.clone()), - DbKeySeg::StringSeg(Keys::VALUES.counters.to_string()), - ], - } -} - -/// Is the storage key for the `counters` field? If so, returns the owner. -pub fn is_counter_key<'a>( - key: &'a storage::Key, - faucet_address: &Address, -) -> Option<&'a Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(address), - DbKeySeg::StringSeg(sub_key), - DbKeySeg::StringSeg(data), - DbKeySeg::AddressSeg(owner), - ] if address == faucet_address - && sub_key.as_str() == Keys::VALUES.counters - && data.as_str() == lazy_map::DATA_SUBKEY => - { - Some(owner) - } - _ => None, - } -} - -/// Storage key to the `difficulty` field. -pub fn difficulty_key(address: &Address) -> storage::Key { - storage::Key { - segments: vec![ - DbKeySeg::AddressSeg(address.clone()), - DbKeySeg::StringSeg(Keys::VALUES.pow_difficulty.to_string()), - ], - } -} - -/// Is the storage key for the `difficulty` field? -pub fn is_difficulty_key(key: &storage::Key, faucet_address: &Address) -> bool { - matches!( - &key.segments[..], - [ - DbKeySeg::AddressSeg(address), - DbKeySeg::StringSeg(sub_key), - ] if address == faucet_address && sub_key.as_str() == Keys::VALUES.pow_difficulty, - ) -} - -/// Storage key to the `withdrawal_limit` field. -pub fn withdrawal_limit_key(address: &Address) -> storage::Key { - storage::Key { - segments: vec![ - DbKeySeg::AddressSeg(address.clone()), - DbKeySeg::StringSeg(Keys::VALUES.withdrawal_limit.to_string()), - ], - } -} - -/// Is the storage key for the `withdrawal_limit` field? -pub fn is_withdrawal_limit_key( - key: &storage::Key, - faucet_address: &Address, -) -> bool { - matches!( - &key.segments[..], - [ - DbKeySeg::AddressSeg(address), - DbKeySeg::StringSeg(sub_key), - ] if address == faucet_address && sub_key.as_str() == Keys::VALUES.withdrawal_limit, - ) -} - -/// Read faucet's counter value for a given target address. -pub fn get_counter( - storage: &S, - faucet_address: &Address, - source: &Address, -) -> storage_api::Result -where - S: StorageRead, -{ - let counter: Counter = counters_handle(faucet_address) - .get(storage, source)? - // `0` if not previously set - .unwrap_or_default(); - Ok(counter) -} - -/// Increment faucet's counter value for a given source address. -pub fn increment_counter( - storage: &mut S, - faucet_address: &Address, - source: &Address, - current_counter: Counter, -) -> storage_api::Result<()> -where - S: StorageWrite + StorageRead, -{ - counters_handle(faucet_address).insert( - storage, - source.clone(), - current_counter + 1, - )?; - Ok(()) -} - -/// A handle to read/write withdrawal counters -pub fn counters_handle(address: &Address) -> LazyMap { - LazyMap::open(counter_prefix(address)) -} - -/// PoW difficulty (value between `0..=9`). -#[derive( - Copy, - Clone, - Debug, - Default, - BorshSerialize, - BorshDeserialize, - BorshSchema, - Eq, - PartialEq, - PartialOrd, - Ord, - Serialize, - Deserialize, -)] -#[serde(transparent)] -pub struct Difficulty(u8); -impl Difficulty { - /// The value must be between `0..=9` (inclusive upper bound). - pub fn try_new(raw: u8) -> Option { - if raw > 9 { None } else { Some(Self(raw)) } - } -} - -impl Display for Difficulty { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0.fmt(f) - } -} - -/// Read PoW [`Difficulty`]. -pub fn read_difficulty( - storage: &S, - address: &Address, -) -> storage_api::Result -where - S: StorageRead, -{ - let difficulty = storage - .read(&difficulty_key(address))? - .expect("difficulty must always be set"); - Ok(difficulty) -} - -/// Write PoW [`Difficulty`]. -pub fn write_difficulty( - storage: &mut S, - address: &Address, - difficulty: Difficulty, -) -> storage_api::Result<()> -where - S: StorageWrite, -{ - storage.write(&difficulty_key(address), difficulty) -} - -/// Read the withdrawal limit. -pub fn read_withdrawal_limit( - storage: &S, - address: &Address, -) -> storage_api::Result -where - S: StorageRead, -{ - let withdrawal_limit = storage - .read(&withdrawal_limit_key(address))? - .expect("withdrawal_limit must always be set"); - Ok(withdrawal_limit) -} - -/// Write faucet withdrawal limit -pub fn write_withdrawal_limit( - storage: &mut S, - address: &Address, - withdrawal_limit: Uint, -) -> Result<(), storage_api::Error> -where - S: StorageWrite, -{ - storage.write(&withdrawal_limit_key(address), withdrawal_limit) -} - -#[cfg(test)] -mod test { - use super::*; - #[test] - fn test_solution_val_bytes_len() { - let val: SolutionValue = 10; - let bytes = val.try_to_vec().unwrap(); - assert_eq!(bytes.len(), SOLUTION_VAL_BYTES_LEN); - } -} diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index e8411a41d2d..9dfe32e6443 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -4,9 +4,9 @@ pub mod generated; mod types; pub use types::{ - Code, Commitment, Data, Dkg, Error, Header, MaspBuilder, MultiSignature, - Section, Signable, SignableEthMessage, Signature, SignatureIndex, Signed, - Tx, TxError, + Code, Commitment, CompressedSignature, Data, Dkg, Error, Header, + MaspBuilder, Section, Signable, SignableEthMessage, Signature, + SignatureIndex, Signed, Signer, Tx, TxError, }; #[cfg(test)] diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 43da551d871..a6082fbbabb 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryFrom; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; @@ -25,7 +25,6 @@ use thiserror::Error; use super::generated::types; use crate::ledger::gas::{GasMetering, VpGasMeter, VERIFY_TX_SIG_GAS_COST}; use crate::ledger::storage::{KeccakHasher, Sha256Hasher, StorageHasher}; -use crate::ledger::testnet_pow; #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use crate::tendermint_proto::abci::ResponseDeliverTx; use crate::types::account::AccountPublicKeysMap; @@ -389,15 +388,20 @@ impl Code { PartialEq, )] pub struct SignatureIndex { + pub pubkey: common::PublicKey, + pub index: Option<(Address, u8)>, pub signature: common::Signature, - pub index: u8, } impl SignatureIndex { - pub fn from_single_signature(signature: common::Signature) -> Self { + pub fn from_single_signature( + pubkey: common::PublicKey, + signature: common::Signature, + ) -> Self { Self { + pubkey, signature, - index: 0, + index: None, } } @@ -405,24 +409,6 @@ impl SignatureIndex { vec![self.clone()] } - pub fn verify( - &self, - public_key_index_map: &AccountPublicKeysMap, - data: &impl SignableBytes, - ) -> std::result::Result<(), VerifySigError> { - let public_key = - public_key_index_map.get_public_key_from_index(self.index); - if let Some(public_key) = public_key { - common::SigScheme::verify_signature( - &public_key, - data, - &self.signature, - ) - } else { - Err(VerifySigError::MissingData) - } - } - pub fn serialize(&self) -> String { let signature_bytes = self.try_to_vec().expect("Signature should be serializable"); @@ -444,7 +430,7 @@ impl SignatureIndex { impl Ord for SignatureIndex { fn cmp(&self, other: &Self) -> Ordering { - self.index.cmp(&other.index) + self.pubkey.cmp(&other.pubkey) } } @@ -454,6 +440,23 @@ impl PartialOrd for SignatureIndex { } } +/// Indicates the list of public keys against which signatures will be verified +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Serialize, + Deserialize, +)] +pub enum Signer { + /// The address of a multisignature account + Address(Address), + /// The public keys that constitute a signer + PubKeys(Vec), +} + /// A section representing a multisig over another section #[derive( Clone, @@ -464,45 +467,54 @@ impl PartialOrd for SignatureIndex { Serialize, Deserialize, )] -pub struct MultiSignature { +pub struct Signature { /// The hash of the section being signed pub targets: Vec, + /// The public keys against which the signatures should be verified + pub signer: Signer, /// The signature over the above hash - pub signatures: BTreeSet, + pub signatures: BTreeMap, } -impl MultiSignature { +impl Signature { /// Sign the given section hash with the given key and return a section pub fn new( targets: Vec, - secret_keys: &[common::SecretKey], - public_keys_index_map: &AccountPublicKeysMap, + secret_keys: BTreeMap, + signer: Option
, ) -> Self { - let target = Self { - targets: targets.clone(), - signatures: BTreeSet::new(), - } - .get_hash(); - - let signatures_public_keys_map = - secret_keys.iter().map(|secret_key: &common::SecretKey| { - let signature = common::SigScheme::sign(secret_key, target); - let public_key = secret_key.ref_to(); - (public_key, signature) - }); - - let signatures = signatures_public_keys_map - .filter_map(|(public_key, signature)| { - let public_key_index = public_keys_index_map - .get_index_from_public_key(&public_key); - public_key_index - .map(|index| SignatureIndex { signature, index }) - }) - .collect::>(); + // If no signer address is given, then derive the signer's public keys + // from the given secret keys. + let signer = if let Some(addr) = signer { + Signer::Address(addr) + } else { + // Make sure the corresponding public keys can be represented by a + // vector instead of a map + assert!( + secret_keys.keys().cloned().eq(0..(secret_keys.len() as u8)), + "secret keys must be enumerateed when signer address is absent" + ); + Signer::PubKeys(secret_keys.values().map(RefTo::ref_to).collect()) + }; - Self { + // Commit to the given targets + let partial = Self { targets, + signer, + signatures: BTreeMap::new(), + }; + let target = partial.get_raw_hash(); + // Turn the map of secret keys into a map of signatures over the + // commitment made above + let signatures = secret_keys + .iter() + .map(|(index, secret_key)| { + (*index, common::SigScheme::sign(secret_key, target)) + }) + .collect(); + Self { signatures, + ..partial } } @@ -528,14 +540,66 @@ impl MultiSignature { pub fn get_raw_hash(&self) -> crate::types::hash::Hash { Self { - signatures: BTreeSet::new(), + signer: Signer::PubKeys(vec![]), + signatures: BTreeMap::new(), ..self.clone() } .get_hash() } + + /// Verify that the signature contained in this section is valid + pub fn verify_signature( + &self, + verified_pks: &mut HashSet, + public_keys_index_map: &AccountPublicKeysMap, + signer: &Option
, + ) -> std::result::Result { + // Records whether there are any successful verifications + let mut verifications = 0; + match &self.signer { + // Verify the signatures against the given public keys if the + // account addresses match + Signer::Address(addr) if Some(addr) == signer.as_ref() => { + for (idx, sig) in &self.signatures { + if let Some(pk) = + public_keys_index_map.get_public_key_from_index(*idx) + { + common::SigScheme::verify_signature( + &pk, + &self.get_raw_hash(), + sig, + )?; + verified_pks.insert(*idx); + verifications += 1; + } + } + } + // If the account addresses do not match, then there is no efficient + // way to map signatures to the given public keys + Signer::Address(_) => {} + // Verify the signatures against the subset of this section's public + // keys that are also in the given map + Signer::PubKeys(pks) => { + for (idx, pk) in pks.iter().enumerate() { + if let Some(map_idx) = + public_keys_index_map.get_index_from_public_key(pk) + { + common::SigScheme::verify_signature( + pk, + &self.get_raw_hash(), + &self.signatures[&(idx as u8)], + )?; + verified_pks.insert(map_idx); + verifications += 1; + } + } + } + } + Ok(verifications) + } } -/// A section representing the signature over another section +/// A section representing a multisig over another section #[derive( Clone, Debug, @@ -545,58 +609,34 @@ impl MultiSignature { Serialize, Deserialize, )] -pub struct Signature { +pub struct CompressedSignature { /// The hash of the section being signed - targets: Vec, - /// The signature over the above hashes - pub signature: Option, -} - -impl Signature { - pub fn new( - targets: Vec, - sec_key: &common::SecretKey, - ) -> Self { - let mut sec = Self { - targets, - signature: None, - }; - sec.signature = Some(common::SigScheme::sign(sec_key, sec.get_hash())); - sec - } - - /// Hash this signature section - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update( - self.try_to_vec() - .expect("unable to serialize signature section"), - ); - hasher - } - - /// Get the hash of this section - pub fn get_hash(&self) -> crate::types::hash::Hash { - crate::types::hash::Hash( - self.hash(&mut Sha256::new()).finalize_reset().into(), - ) - } - - /// Verify that the signature contained in this section is valid - pub fn verify_signature( - &self, - public_key: &common::PublicKey, - ) -> std::result::Result<(), VerifySigError> { - let signature = - self.signature.as_ref().ok_or(VerifySigError::MissingData)?; - common::SigScheme::verify_signature( - public_key, - &Self { - signature: None, - ..self.clone() + pub targets: Vec, + /// The public keys against which the signatures should be verified + pub signer: Signer, + /// The signature over the above hash + pub signatures: BTreeMap, +} + +impl CompressedSignature { + /// Decompress this signature object with respect to the given transaction + /// by looking up the necessary section hashes. Used by constrained hardware + /// wallets. + pub fn expand(self, tx: &Tx) -> Signature { + let mut targets = Vec::new(); + for idx in self.targets { + if idx == 0 { + // The "zeroth" section is the header + targets.push(tx.header_hash()); + } else { + targets.push(tx.sections[idx as usize - 1].get_hash()); } - .get_hash(), - signature, - ) + } + Signature { + targets, + signer: self.signer, + signatures: self.signatures, + } } } @@ -900,8 +940,6 @@ pub enum Section { ExtraData(Code), /// Transaction code. Sending to hardware wallets optional Code(Code), - /// A transaction signature. Often produced by hardware wallets - SectionSignature(MultiSignature), /// A transaction header/protocol signature Signature(Signature), /// Ciphertext obtained by encrypting arbitrary transaction sections @@ -933,7 +971,6 @@ impl Section { Self::ExtraData(extra) => extra.hash(hasher), Self::Code(code) => code.hash(hasher), Self::Signature(signature) => signature.hash(hasher), - Self::SectionSignature(signatures) => signatures.hash(hasher), Self::Ciphertext(ct) => ct.hash(hasher), Self::MaspBuilder(mb) => mb.hash(hasher), Self::MaspTx(tx) => { @@ -1005,15 +1042,6 @@ impl Section { } } - /// Extract the section signature from this section if possible - pub fn section_signature(&self) -> Option { - if let Self::SectionSignature(data) = self { - Some(data.clone()) - } else { - None - } - } - /// Extract the ciphertext from this section if possible pub fn ciphertext(&self) -> Option { if let Self::Ciphertext(data) = self { @@ -1346,68 +1374,73 @@ impl Tx { /// Verify that the section with the given hash has been signed by the given /// public key - pub fn verify_section_signatures( + pub fn verify_signatures( &self, hashes: &[crate::types::hash::Hash], public_keys_index_map: AccountPublicKeysMap, + signer: &Option
, threshold: u8, max_signatures: Option, - gas_meter: &mut VpGasMeter, - ) -> std::result::Result<(), Error> { + mut gas_meter: Option<&mut VpGasMeter>, + ) -> std::result::Result, Error> { let max_signatures = max_signatures.unwrap_or(u8::MAX); - let mut valid_signatures = 0; + // Records the public key indices used in successful signatures + let mut verified_pks = HashSet::new(); + // Records the sections instrumental in verifying signatures + let mut witnesses = Vec::new(); for section in &self.sections { - if let Section::SectionSignature(signatures) = section { - if !hashes.iter().all(|x| { + if let Section::Signature(signatures) = section { + // Check that the hashes being checked are a subset of those in + // this section. Also ensure that all the sections the signature + // signs over are present. + if hashes.iter().all(|x| { signatures.targets.contains(x) || section.get_hash() == *x - }) { - return Err(Error::InvalidSectionSignature( - "missing target hash.".to_string(), - )); - } - - for target in &signatures.targets { - if self.get_section(target).is_none() { + }) && signatures + .targets + .iter() + .all(|x| self.get_section(x).is_some()) + { + if signatures.total_signatures() > max_signatures { return Err(Error::InvalidSectionSignature( - "Missing target section.".to_string(), + "too many signatures.".to_string(), )); } - } - - if signatures.total_signatures() > max_signatures { - return Err(Error::InvalidSectionSignature( - "too many signatures.".to_string(), - )); - } - - if signatures.total_signatures() < threshold { - return Err(Error::InvalidSectionSignature( - "too few signatures.".to_string(), - )); - } - for signature_index in &signatures.signatures { - let is_valid_signature = signature_index - .verify( + // Finally verify that the signature itself is valid + let prev_verifieds = verified_pks.len(); + let amt_verifieds = signatures + .verify_signature( + &mut verified_pks, &public_keys_index_map, - &signatures.get_raw_hash(), + signer, ) - .is_ok(); - gas_meter - .consume(VERIFY_TX_SIG_GAS_COST) - .map_err(|_| Error::OutOfGas)?; - if is_valid_signature { - valid_signatures += 1; + .map_err(|_| { + Error::InvalidSectionSignature( + "found invalid signature.".to_string(), + ) + }); + // Compute the cost of the signature verifications + if let Some(x) = gas_meter.as_mut() { + let amt_verified = usize::from(amt_verifieds.is_err()) + + verified_pks.len() + - prev_verifieds; + x.consume(VERIFY_TX_SIG_GAS_COST * amt_verified as u64) + .map_err(|_| Error::OutOfGas)?; + } + // Record the section witnessing these signatures + if amt_verifieds? > 0 { + witnesses.push(signatures); } - if valid_signatures >= threshold { - return Ok(()); + // Short-circuit these checks if the threshold is exceeded + if verified_pks.len() >= threshold.into() { + return Ok(witnesses); } } } } Err(Error::InvalidSectionSignature( - "invalid signatures.".to_string(), + "signature threshold not met.".to_string(), )) } @@ -1419,31 +1452,16 @@ impl Tx { public_key: &common::PublicKey, hashes: &[crate::types::hash::Hash], ) -> Result<&Signature> { - for section in &self.sections { - if let Section::Signature(signature) = section { - // Check that the hashes being - // checked are a subset of those in this section - if hashes.iter().all(|x| { - signature.targets.contains(x) || section.get_hash() == *x - }) { - // Ensure that all the sections the signature signs over are - // present - for target in &signature.targets { - if self.get_section(target).is_none() { - return Err(Error::InvalidSectionSignature( - "Target section is missing.".to_string(), - )); - } - } - // Finally verify that the signature itself is valid - return signature - .verify_signature(public_key) - .map(|_| signature) - .map_err(|_| Error::InvalidWrapperSignature); - } - } - } - Err(Error::InvalidWrapperSignature) + self.verify_signatures( + hashes, + AccountPublicKeysMap::from_iter([public_key.clone()].into_iter()), + &None, + 1, + None, + None, + ) + .map(|x| *x.first().unwrap()) + .map_err(|_| Error::InvalidWrapperSignature) } /// Validate any and all ciphertexts stored in this transaction @@ -1466,10 +1484,38 @@ impl Tx { &self, secret_keys: &[common::SecretKey], public_keys_index_map: &AccountPublicKeysMap, - ) -> BTreeSet { + signer: Option
, + ) -> Vec { let targets = self.inner_section_targets(); - MultiSignature::new(targets, secret_keys, public_keys_index_map) - .signatures + let mut signatures = Vec::new(); + let section = Signature::new( + targets, + public_keys_index_map.index_secret_keys(secret_keys.to_vec()), + signer, + ); + match section.signer { + Signer::Address(addr) => { + for (idx, signature) in section.signatures { + signatures.push(SignatureIndex { + pubkey: public_keys_index_map + .get_public_key_from_index(idx) + .unwrap(), + index: Some((addr.clone(), idx)), + signature, + }); + } + } + Signer::PubKeys(pub_keys) => { + for (idx, signature) in section.signatures { + signatures.push(SignatureIndex { + pubkey: pub_keys[idx as usize].clone(), + index: None, + signature, + }); + } + } + } + signatures } /// Decrypt any and all ciphertexts stored in this transaction use the @@ -1500,7 +1546,6 @@ impl Tx { /// signatures over it #[cfg(feature = "ferveo-tpke")] pub fn encrypt(&mut self, pubkey: &EncryptionKey) -> &mut Self { - use crate::types::hash::Hash; let header_hash = self.header_hash(); let mut plaintexts = vec![]; // Iterate backwrds to sidestep the effects of deletion on indexing @@ -1508,7 +1553,7 @@ impl Tx { match &self.sections[i] { Section::Signature(sig) if sig.targets.contains(&header_hash) => {} - Section::MaspTx(_) => { + masp_section @ Section::MaspTx(_) => { // Do NOT encrypt the fee unshielding transaction if let Some(unshield_section_hash) = self .header() @@ -1516,14 +1561,7 @@ impl Tx { .expect("Tried to encrypt a non-wrapper tx") .unshield_section_hash { - if unshield_section_hash - == Hash( - self.sections[i] - .hash(&mut Sha256::new()) - .finalize_reset() - .into(), - ) - { + if unshield_section_hash == masp_section.get_hash() { continue; } } @@ -1696,9 +1734,6 @@ impl Tx { fee_payer: common::PublicKey, epoch: Epoch, gas_limit: GasLimit, - #[cfg(not(feature = "mainnet"))] requires_pow: Option< - testnet_pow::Solution, - >, fee_unshield_hash: Option, ) -> &mut Self { self.header.tx_type = TxType::Wrapper(Box::new(WrapperTx::new( @@ -1706,8 +1741,6 @@ impl Tx { fee_payer, epoch, gas_limit, - #[cfg(not(feature = "mainnet"))] - requires_pow, fee_unshield_hash, ))); self @@ -1718,7 +1751,8 @@ impl Tx { self.protocol_filter(); self.add_section(Section::Signature(Signature::new( self.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); self } @@ -1728,27 +1762,53 @@ impl Tx { &mut self, keypairs: Vec, account_public_keys_map: AccountPublicKeysMap, + signer: Option
, ) -> &mut Self { self.protocol_filter(); let hashes = self.inner_section_targets(); - self.add_section(Section::SectionSignature(MultiSignature::new( + self.add_section(Section::Signature(Signature::new( hashes, - &keypairs, - &account_public_keys_map, + account_public_keys_map.index_secret_keys(keypairs), + signer, ))); self } - /// Add signature + /// Add signatures pub fn add_signatures( &mut self, - signatures: BTreeSet, + signatures: Vec, ) -> &mut Self { self.protocol_filter(); - self.add_section(Section::SectionSignature(MultiSignature { + let mut pk_section = Signature { targets: self.inner_section_targets(), - signatures, - })); + signatures: BTreeMap::new(), + signer: Signer::PubKeys(vec![]), + }; + let mut sections = HashMap::new(); + // Put the supplied signatures into the correct sections + for signature in signatures { + if let Some((addr, idx)) = &signature.index { + // Add the signature under the given multisig address + let section = + sections.entry(addr.clone()).or_insert_with(|| Signature { + targets: self.inner_section_targets(), + signatures: BTreeMap::new(), + signer: Signer::Address(addr.clone()), + }); + section.signatures.insert(*idx, signature.signature); + } else if let Signer::PubKeys(pks) = &mut pk_section.signer { + // Add the signature under its corresponding public key + pk_section + .signatures + .insert(pks.len() as u8, signature.signature); + pks.push(signature.pubkey); + } + } + for section in std::iter::once(pk_section).chain(sections.into_values()) + { + self.add_section(Section::Signature(section)); + } self } } diff --git a/core/src/types/account.rs b/core/src/types/account.rs index d66876ba37b..83f0b0aafea 100644 --- a/core/src/types/account.rs +++ b/core/src/types/account.rs @@ -1,12 +1,12 @@ //! Helper structures to manage accounts -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; use super::address::Address; -use super::key::common; +use super::key::{common, RefTo}; #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize, @@ -89,4 +89,18 @@ impl AccountPublicKeysMap { ) -> Option { self.pk_to_idx.get(public_key).cloned() } + + /// Index the given set of secret keys + pub fn index_secret_keys( + &self, + secret_keys: Vec, + ) -> BTreeMap { + secret_keys + .into_iter() + .filter_map(|secret_key: common::SecretKey| { + self.get_index_from_public_key(&secret_key.ref_to()) + .map(|index| (index, secret_key)) + }) + .collect() + } } diff --git a/core/src/types/eth_abi.rs b/core/src/types/eth_abi.rs index adda87e6c95..27d2b93db17 100644 --- a/core/src/types/eth_abi.rs +++ b/core/src/types/eth_abi.rs @@ -198,17 +198,17 @@ mod tests { ) .expect("Test failed"), ], - voting_powers: vec![8828299.try_into().unwrap()], + voting_powers: vec![8828299u64.into()], epoch: 0.into(), }; let encoded = valset_update.encode().into_inner(); let encoded = HEXLOWER.encode(&encoded); - let expected = "00000000000000000000000000000000000000000000000000000000000000200000\ - 00000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000\ - 000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000\ - 00000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000\ - 241d37b7cf5233b3b0b204321420a86e8f7bfdb50000000000000000000000000000000000000000000000000000\ - 000000000001000000000000000000000000000000000000000000000000000000000086b58b"; + let expected = "000000000000000000000000000000000000000000000000000000000000002\ + 000000000000000000000000000000000000000000000000000000000000000\ + 400000000000000000000000000000000000000000000000000000000000000\ + 000000000000000000000000000000000000000000000000000000000000000\ + 0001241d37b7cf5233b3b0b204321420a86e8f7bfdb50000000000000000008\ + 6b58b"; assert_eq!(expected, encoded); } } diff --git a/core/src/types/eth_bridge_pool.rs b/core/src/types/eth_bridge_pool.rs index 86b08155095..0f1a8873452 100644 --- a/core/src/types/eth_bridge_pool.rs +++ b/core/src/types/eth_bridge_pool.rs @@ -210,12 +210,12 @@ impl PendingTransfer { impl From<&PendingTransfer> for ethbridge_structs::Erc20Transfer { fn from(pending: &PendingTransfer) -> Self { - let HashDigest(namada_data_digest) = pending.appendix().checksum(); + let HashDigest(data_digest) = pending.appendix().checksum(); Self { from: pending.transfer.asset.0.into(), to: pending.transfer.recipient.0.into(), amount: pending.transfer.amount.into(), - namada_data_digest, + data_digest, } } } diff --git a/core/src/types/ethereum_events.rs b/core/src/types/ethereum_events.rs index 57c8047fdd5..8dce0a39a42 100644 --- a/core/src/types/ethereum_events.rs +++ b/core/src/types/ethereum_events.rs @@ -235,8 +235,6 @@ pub struct TransfersToNamada { pub nonce: Uint, /// The batch of transfers pub transfers: Vec, - /// The indices of the transfers which succeeded or failed - pub valid_transfers_map: Vec, } impl GetEventNonce for TransfersToNamada { @@ -249,16 +247,8 @@ impl GetEventNonce for TransfersToNamada { impl From for EthereumEvent { #[inline] fn from(event: TransfersToNamada) -> Self { - let TransfersToNamada { - nonce, - transfers, - valid_transfers_map, - } = event; - Self::TransfersToNamada { - nonce, - transfers, - valid_transfers_map, - } + let TransfersToNamada { nonce, transfers } = event; + Self::TransfersToNamada { nonce, transfers } } } @@ -285,9 +275,6 @@ pub enum EthereumEvent { /// The batch of transfers #[allow(dead_code)] transfers: Vec, - /// The indices of the transfers which succeeded or failed - #[allow(dead_code)] - valid_transfers_map: Vec, }, /// A confirmation event that a batch of transfers have been made /// from Namada to Ethereum @@ -298,9 +285,6 @@ pub enum EthereumEvent { /// The batch of transfers #[allow(dead_code)] transfers: Vec, - /// The indices of the transfers which succeeded or failed - #[allow(dead_code)] - valid_transfers_map: Vec, /// The Namada address that receives the gas fees /// for relaying a batch of transfers #[allow(dead_code)] @@ -319,25 +303,6 @@ pub enum EthereumEvent { #[allow(dead_code)] governance_validator_hash: KeccakHash, }, - /// Event indication that a new smart contract has been - /// deployed - NewContract { - /// Name of the contract - #[allow(dead_code)] - name: String, - /// Address of the contract on Ethereum - #[allow(dead_code)] - address: EthAddress, - }, - /// Event indicating that a smart contract has been updated - UpgradedContract { - /// Name of the contract - #[allow(dead_code)] - name: String, - /// Address of the contract on Ethereum - #[allow(dead_code)] - address: EthAddress, - }, } impl EthereumEvent { @@ -417,7 +382,7 @@ impl From for TransferToEthereum { }, asset: EthAddress(transfer.from.0), receiver: EthAddress(transfer.to.0), - checksum: Hash(transfer.namada_data_digest), + checksum: Hash(transfer.data_digest), } } } @@ -530,7 +495,6 @@ pub mod testing { asset: arbitrary_eth_address(), receiver, }], - valid_transfers_map: vec![true], } } } diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs index b5a2c430922..bcb4c20817e 100644 --- a/core/src/types/internal.rs +++ b/core/src/types/internal.rs @@ -2,6 +2,8 @@ use borsh::{BorshDeserialize, BorshSerialize}; +use crate::types::ethereum_events::EthereumEvent; + /// A result of a wasm call to host functions that may fail. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HostEnvResult { @@ -61,12 +63,6 @@ mod tx_queue { /// This allows for a more detailed logging about the gas used by the /// wrapper and that used by the inner pub gas: Gas, - #[cfg(not(feature = "mainnet"))] - /// A PoW solution can be used to allow zero-fee testnet - /// transactions. - /// This is true when the wrapper of this tx contains a valid - /// `testnet_pow::Solution` - pub has_valid_pow: bool, } #[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] @@ -105,3 +101,30 @@ mod tx_queue { #[cfg(feature = "ferveo-tpke")] pub use tx_queue::{TxInQueue, TxQueue}; + +/// Expired transaction kinds. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub enum ExpiredTx { + /// Broadcast the given Ethereum event. + EthereumEvent(EthereumEvent), +} + +/// Queue of expired transactions that need to be retransmitted. +#[derive(Default, Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct ExpiredTxsQueue { + inner: Vec, +} + +impl ExpiredTxsQueue { + /// Push a new transaction to the back of the queue. + #[inline] + pub fn push(&mut self, tx: ExpiredTx) { + self.inner.push(tx); + } + + /// Consume all the transactions in the queue. + #[inline] + pub fn drain(&mut self) -> impl Iterator + '_ { + self.inner.drain(..) + } +} diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index b6467b8a000..4aae00b42e5 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -497,6 +497,14 @@ impl Key { Ok(Key { segments }) } + /// Takes ownership of the key, appends a new segment to it, + /// and returns the modified key. + #[must_use] + pub fn with_segment(mut self, other: T) -> Self { + self.segments.push(other.to_db_key()); + self + } + /// Returns a new key with segments of `Self` and the given key pub fn join(&self, other: &Key) -> Self { let mut segments = self.segments.clone(); @@ -1482,7 +1490,6 @@ mod tests { let mut queue = EthEventsQueue::default(); queue.transfers_to_namada.next_nonce_to_process = 2u64.into(); let new_event = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 2u64.into(), }; @@ -1502,7 +1509,6 @@ mod tests { let mut queue = EthEventsQueue::default(); queue.transfers_to_namada.next_nonce_to_process = 3u64.into(); let new_event = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 2u64.into(), }; @@ -1517,27 +1523,22 @@ mod tests { queue.transfers_to_namada.next_nonce_to_process = 1u64.into(); let new_event_1 = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 1u64.into(), }; let new_event_2 = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 2u64.into(), }; let new_event_3 = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 3u64.into(), }; let new_event_4 = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 4u64.into(), }; let new_event_7 = TransfersToNamada { - valid_transfers_map: vec![], transfers: vec![], nonce: 7u64.into(), }; diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 3201684e0f7..0ee60b43264 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -190,9 +190,13 @@ impl Amount { denom: impl Into, ) -> Result { let denom = denom.into(); + let uint = uint.into(); + if denom == 0 { + return Ok(Self { raw: uint }); + } match Uint::from(10) .checked_pow(Uint::from(denom)) - .and_then(|scaling| scaling.checked_mul(uint.into())) + .and_then(|scaling| scaling.checked_mul(uint)) { Some(amount) => Ok(Self { raw: amount }), None => Err(AmountParseError::ConvertToDecimal), diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 59b9965160b..bbebc85e777 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -24,17 +24,7 @@ pub mod decrypted_tx { /// other validators to verify pub enum DecryptedTx { /// The decrypted payload - Decrypted { - #[cfg(not(feature = "mainnet"))] - /// A PoW solution can be used to allow zero-fee testnet - /// transactions. - /// This is true when the wrapper of this tx contains a valid - /// `testnet_pow::Solution`. - // For some reason, we get `warning: fields `tx` and - // `has_valid_pow` are never read` even though they are being used! - #[allow(dead_code)] - has_valid_pow: bool, - }, + Decrypted, /// The wrapper whose payload could not be decrypted Undecryptable, } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 6b00e4dfcc7..8acb9e6c7ef 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -235,7 +235,8 @@ mod test_process_tx { .clone(); tx.add_section(Section::Signature(Signature::new( vec![*tx.code_sechash(), *tx.data_sechash()], - &gen_keypair(), + [(0, gen_keypair())].into_iter().collect(), + None, ))); tx.validate_tx().expect("Test failed"); @@ -263,15 +264,14 @@ mod test_process_tx { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); tx.set_code(Code::new("wasm code".as_bytes().to_owned())); tx.set_data(Data::new("transaction data".as_bytes().to_owned())); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); tx.validate_tx().expect("Test failed"); @@ -300,8 +300,6 @@ mod test_process_tx { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); tx.set_code(Code::new("wasm code".as_bytes().to_owned())); @@ -316,10 +314,7 @@ mod test_process_tx { #[test] fn test_process_tx_decrypted_unsigned() { use crate::proto::{Code, Data, Tx}; - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); let code_sec = tx .set_code(Code::new("transaction data".as_bytes().to_owned())) .clone(); @@ -328,10 +323,7 @@ fn test_process_tx_decrypted_unsigned() { .clone(); tx.validate_tx().expect("Test failed"); match tx.header().tx_type { - TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: _, - }) => { + TxType::Decrypted(DecryptedTx::Decrypted) => { assert_eq!(tx.header().code_hash, code_sec.get_hash(),); assert_eq!(tx.header().data_hash, data_sec.get_hash(),); } @@ -357,16 +349,18 @@ fn test_process_tx_decrypted_signed() { use crate::types::key::Signature as S; let mut decrypted = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - })); + Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); // Invalid signed data let ed_sig = ed25519::Signature::try_from_slice([0u8; 64].as_ref()).unwrap(); - let mut sig_sec = - Signature::new(vec![decrypted.header_hash()], &gen_keypair()); - sig_sec.signature = Some(common::Signature::try_from_sig(&ed_sig).unwrap()); + let mut sig_sec = Signature::new( + vec![decrypted.header_hash()], + [(0, gen_keypair())].into_iter().collect(), + None, + ); + sig_sec + .signatures + .insert(0, common::Signature::try_from_sig(&ed_sig).unwrap()); decrypted.add_section(Section::Signature(sig_sec)); // create the tx with signed decrypted data let code_sec = decrypted @@ -377,10 +371,7 @@ fn test_process_tx_decrypted_signed() { .clone(); decrypted.validate_tx().expect("Test failed"); match decrypted.header().tx_type { - TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: _, - }) => { + TxType::Decrypted(DecryptedTx::Decrypted) => { assert_eq!(decrypted.header.code_hash, code_sec.get_hash()); assert_eq!(decrypted.header.data_hash, data_sec.get_hash()); } diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index aa9abe8cec8..1a51434b296 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -201,7 +201,8 @@ mod protocol_txs { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( outer_tx.sechashes(), - signing_key, + [(0, signing_key.clone())].into_iter().collect(), + None, ))); outer_tx } @@ -339,7 +340,8 @@ mod protocol_txs { *outer_tx.code_sechash(), *outer_tx.data_sechash(), ], - signing_key, + [(0, signing_key.clone())].into_iter().collect(), + None, ))); outer_tx } diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index bda7142220a..e9b49b0c07c 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -15,7 +15,6 @@ pub mod wrapper_tx { use sha2::{Digest, Sha256}; use thiserror::Error; - use crate::ledger::testnet_pow; use crate::proto::{Code, Data, Section, Tx}; use crate::types::address::{masp, Address}; use crate::types::hash::Hash; @@ -191,9 +190,6 @@ pub mod wrapper_tx { /// The hash of the optional, unencrypted, unshielding transaction for /// fee payment pub unshield_section_hash: Option, - #[cfg(not(feature = "mainnet"))] - /// A PoW solution can be used to allow zero-fee testnet transactions - pub pow_solution: Option, } impl WrapperTx { @@ -207,9 +203,6 @@ pub mod wrapper_tx { pk: common::PublicKey, epoch: Epoch, gas_limit: GasLimit, - #[cfg(not(feature = "mainnet"))] pow_solution: Option< - testnet_pow::Solution, - >, unshield_hash: Option, ) -> WrapperTx { Self { @@ -218,8 +211,6 @@ pub mod wrapper_tx { epoch, gas_limit, unshield_section_hash: unshield_hash, - #[cfg(not(feature = "mainnet"))] - pow_solution, } } @@ -293,10 +284,7 @@ pub mod wrapper_tx { ) -> Result { let mut tx = Tx::from_type(crate::types::transaction::TxType::Decrypted( - crate::types::transaction::DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - }, + crate::types::transaction::DecryptedTx::Decrypted, )); let masp_section = tx.add_section(Section::MaspTx(unshield)); let masp_hash = Hash( @@ -441,8 +429,6 @@ pub mod wrapper_tx { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.set_code(Code::new("wasm code".as_bytes().to_owned())); @@ -452,7 +438,8 @@ pub mod wrapper_tx { encrypted_tx.encrypt(&Default::default()); wrapper.add_section(Section::Signature(Signature::new( vec![wrapper.header_hash(), wrapper.sections[0].get_hash()], - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); assert!(encrypted_tx.validate_ciphertext()); let privkey = ::G2Affine::prime_subgroup_generator(); @@ -476,8 +463,6 @@ pub mod wrapper_tx { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); wrapper.set_code(Code::new("wasm code".as_bytes().to_owned())); @@ -489,7 +474,8 @@ pub mod wrapper_tx { wrapper.encrypt(&Default::default()); wrapper.add_section(Section::Signature(Signature::new( vec![wrapper.header_hash(), wrapper.sections[0].get_hash()], - &keypair, + [(0, keypair)].into_iter().collect(), + None, ))); assert!(wrapper.validate_ciphertext()); let privkey = ::G2Affine::prime_subgroup_generator(); @@ -514,8 +500,6 @@ pub mod wrapper_tx { keypair.ref_to(), Epoch(0), Default::default(), - #[cfg(not(feature = "mainnet"))] - None, None, )))); @@ -523,7 +507,8 @@ pub mod wrapper_tx { tx.set_data(Data::new("transaction data".as_bytes().to_owned())); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), - &keypair, + [(0, keypair.clone())].into_iter().collect(), + None, ))); // we now try to alter the inner tx maliciously diff --git a/core/src/types/vote_extensions/ethereum_events.rs b/core/src/types/vote_extensions/ethereum_events.rs index 863211a18d7..efd019c76b1 100644 --- a/core/src/types/vote_extensions/ethereum_events.rs +++ b/core/src/types/vote_extensions/ethereum_events.rs @@ -155,16 +155,15 @@ mod tests { let event = EthereumEvent::TransfersToNamada { nonce, transfers: vec![], - valid_transfers_map: vec![], }; let hash = event.hash().unwrap(); assert_eq!( hash, Hash([ - 237, 76, 45, 220, 228, 238, 146, 153, 170, 10, 70, 130, 32, 16, - 67, 66, 231, 34, 223, 166, 173, 203, 204, 195, 54, 19, 165, - 119, 63, 252, 187, 132 + 94, 131, 116, 129, 41, 204, 178, 144, 24, 8, 185, 16, 103, 236, + 209, 191, 20, 89, 145, 17, 41, 233, 31, 98, 185, 6, 217, 204, + 80, 38, 224, 23 ]) ); } @@ -181,12 +180,10 @@ mod tests { let ev_1 = EthereumEvent::TransfersToNamada { nonce: 1u64.into(), transfers: vec![], - valid_transfers_map: vec![], }; let ev_2 = EthereumEvent::TransfersToEthereum { nonce: 2u64.into(), transfers: vec![], - valid_transfers_map: vec![], relayer: address::testing::established_address_1(), }; diff --git a/core/src/types/vote_extensions/validator_set_update.rs b/core/src/types/vote_extensions/validator_set_update.rs index 5a9324fec6b..bf3d749ff5d 100644 --- a/core/src/types/vote_extensions/validator_set_update.rs +++ b/core/src/types/vote_extensions/validator_set_update.rs @@ -4,7 +4,6 @@ use std::cmp::Ordering; use std::collections::HashMap; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use ethabi::ethereum_types as ethereum; use crate::proto::Signed; use crate::types::address::Address; @@ -165,16 +164,16 @@ pub trait VotingPowersMapExt { /// sorted in descending order by voting power, as this is more efficient to /// deal with on the Ethereum side when working out if there is enough /// voting power for a given validator set update. - fn get_abi_encoded(&self) -> (Vec, Vec, Vec) { + fn get_abi_encoded(&self) -> (Vec, Vec) { let sorted = self.get_sorted(); let total_voting_power: token::Amount = sorted.iter().map(|&(_, &voting_power)| voting_power).sum(); - // split the vec into three portions - sorted.into_iter().fold( - Default::default(), - |accum, (addr_book, &voting_power)| { + // split the vec into two portions + sorted + .into_iter() + .map(|(addr_book, &voting_power)| { let voting_power: EthBridgeVotingPower = FractionalVotingPower::new( voting_power.into(), @@ -186,22 +185,23 @@ pub trait VotingPowersMapExt { ) .into(); - let (mut hot_key_addrs, mut cold_key_addrs, mut voting_powers) = - accum; let &EthAddrBook { - hot_key_addr: EthAddress(hot_key_addr), - cold_key_addr: EthAddress(cold_key_addr), + hot_key_addr, + cold_key_addr, } = addr_book; - hot_key_addrs - .push(Token::Address(ethereum::H160(hot_key_addr))); - cold_key_addrs - .push(Token::Address(ethereum::H160(cold_key_addr))); - voting_powers.push(Token::Uint(voting_power.into())); - - (hot_key_addrs, cold_key_addrs, voting_powers) - }, - ) + ( + Token::FixedBytes( + encode_validator_data(hot_key_addr, voting_power) + .into(), + ), + Token::FixedBytes( + encode_validator_data(cold_key_addr, voting_power) + .into(), + ), + ) + }) + .unzip() } /// Returns the bridge and governance keccak hashes of @@ -211,13 +211,11 @@ pub trait VotingPowersMapExt { &self, next_epoch: Epoch, ) -> (KeccakHash, KeccakHash) { - let (hot_key_addrs, cold_key_addrs, voting_powers) = - self.get_abi_encoded(); + let (bridge_validators, governance_validators) = self.get_abi_encoded(); valset_upd_toks_to_hashes( next_epoch, - hot_key_addrs, - cold_key_addrs, - voting_powers, + bridge_validators, + governance_validators, ) } } @@ -227,23 +225,20 @@ pub trait VotingPowersMapExt { /// voting powers, normalized to `2^32`. pub fn valset_upd_toks_to_hashes( next_epoch: Epoch, - hot_key_addrs: Vec, - cold_key_addrs: Vec, - voting_powers: Vec, + bridge_validators: Vec, + governance_validators: Vec, ) -> (KeccakHash, KeccakHash) { let bridge_hash = compute_hash( next_epoch, BRIDGE_CONTRACT_VERSION, BRIDGE_CONTRACT_NAMESPACE, - hot_key_addrs, - voting_powers.clone(), + bridge_validators, ); let governance_hash = compute_hash( next_epoch, GOVERNANCE_CONTRACT_VERSION, GOVERNANCE_CONTRACT_NAMESPACE, - cold_key_addrs, - voting_powers, + governance_validators, ); (bridge_hash, governance_hash) } @@ -286,21 +281,46 @@ fn compute_hash( contract_version: u8, contract_namespace: &str, validators: Vec, - voting_powers: Vec, ) -> KeccakHash { AbiEncode::keccak256(&[ Token::Uint(contract_version.into()), Token::String(contract_namespace.into()), Token::Array(validators), - Token::Array(voting_powers), epoch_to_token(next_epoch), ]) } +/// Given a validator's [`EthAddress`] and its respective +/// [`EthBridgeVotingPower`], return an encoded representation +/// of this data, understood by the smart contract. +#[inline] +fn encode_validator_data( + address: EthAddress, + voting_power: EthBridgeVotingPower, +) -> [u8; 32] { + let address = address.0; + let voting_power = u128::from(voting_power).to_be_bytes(); + + let mut buffer = [0u8; 32]; + buffer[..20].copy_from_slice(&address); + buffer[20..].copy_from_slice(&voting_power[4..]); + + buffer +} + /// Struct for serializing validator set /// arguments with ABI for Ethereum smart /// contracts. -#[derive(Debug, Clone, Default, Eq, PartialEq)] +#[derive( + Debug, + Clone, + Default, + Eq, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] // TODO: find a new home for this type pub struct ValidatorSetArgs { /// Ethereum addresses of the validators. @@ -322,13 +342,10 @@ impl From for ethbridge_structs::ValidatorSetArgs { epoch, } = valset; ethbridge_structs::ValidatorSetArgs { - validators: validators - .into_iter() - .map(|addr| addr.0.into()) - .collect(), - powers: voting_powers + validator_set: validators .into_iter() - .map(|power| u64::from(power).into()) + .zip(voting_powers.into_iter()) + .map(|(addr, power)| encode_validator_data(addr, power)) .collect(), nonce: epoch.0.into(), } @@ -337,20 +354,17 @@ impl From for ethbridge_structs::ValidatorSetArgs { impl Encode<1> for ValidatorSetArgs { fn tokenize(&self) -> [Token; 1] { - let addrs = Token::Array( + let validator_set = Token::Array( self.validators .iter() - .map(|addr| Token::Address(addr.0.into())) - .collect(), - ); - let powers = Token::Array( - self.voting_powers - .iter() - .map(|&power| Token::Uint(power.into())) + .zip(self.voting_powers.iter()) + .map(|(&addr, &power)| { + Token::FixedBytes(encode_validator_data(addr, power).into()) + }) .collect(), ); let nonce = Token::Uint(self.epoch.0.into()); - [Token::Tuple(vec![addrs, powers, nonce])] + [Token::Tuple(vec![validator_set, nonce])] } } @@ -384,7 +398,7 @@ mod tag { ext.voting_powers.get_bridge_and_gov_hashes(next_epoch); AbiEncode::signable_keccak256(&[ Token::Uint(GOVERNANCE_CONTRACT_VERSION.into()), - Token::String("updateValidatorsSet".into()), + Token::String("updateValidatorSet".into()), Token::FixedBytes(bridge_hash.to_vec()), Token::FixedBytes(gov_hash.to_vec()), epoch_to_token(next_epoch), @@ -409,11 +423,11 @@ mod tests { // const ethers = require('ethers'); // const keccak256 = require('keccak256') // - // const abiEncoder = new ethers.utils.AbiCoder(); + // const abiEncoder = new ethers.AbiCoder(); // // const output = abiEncoder.encode( - // ['uint256', 'string', 'address[]', 'uint256[]', 'uint256'], - // [1, 'bridge', [], [], 1], + // ['uint256', 'string', 'bytes32[]', 'uint256'], + // [1, 'bridge', [], 1], // ); // // const hash = keccak256(output).toString('hex'); @@ -421,14 +435,13 @@ mod tests { // console.log(hash); // ``` const EXPECTED: &str = - "b8da710845ad3b9e8a9dec6639a0b1a60c90441037cc0845c4b45d5aed19ec59"; + "b97454f4c266c0d223651a52a705d76f3be337ace04be4590d9aedab9818dabc"; let KeccakHash(got) = compute_hash( 1u64.into(), BRIDGE_CONTRACT_VERSION, BRIDGE_CONTRACT_NAMESPACE, vec![], - vec![], ); assert_eq!(&HEXLOWER.encode(&got[..]), EXPECTED); diff --git a/core/src/types/voting_power.rs b/core/src/types/voting_power.rs index 946e08b834a..a28eedc1a4d 100644 --- a/core/src/types/voting_power.rs +++ b/core/src/types/voting_power.rs @@ -12,6 +12,7 @@ use num_traits::ops::checked::CheckedAdd; use serde::de::Visitor; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use crate::types::token::Amount; use crate::types::uint::Uint; /// Namada voting power, normalized to the range `0 - 2^32`. @@ -29,19 +30,28 @@ use crate::types::uint::Uint; Hash, Debug, )] -pub struct EthBridgeVotingPower(u64); +pub struct EthBridgeVotingPower(u128); impl EthBridgeVotingPower { /// Maximum value that can be represented for the voting power /// stored in an Ethereum bridge smart contract. - pub const MAX: Self = Self(1 << 32); + /// + /// The smart contract uses 12-byte integers. + pub const MAX: Self = Self((1 << 96) - 1); } -impl TryFrom for EthBridgeVotingPower { +impl From for EthBridgeVotingPower { + #[inline] + fn from(val: u64) -> Self { + Self(val as u128) + } +} + +impl TryFrom for EthBridgeVotingPower { type Error = (); #[inline] - fn try_from(val: u64) -> Result { + fn try_from(val: u128) -> Result { if val <= Self::MAX.0 { Ok(Self(val)) } else { @@ -51,14 +61,11 @@ impl TryFrom for EthBridgeVotingPower { } impl From<&FractionalVotingPower> for EthBridgeVotingPower { - fn from(ratio: &FractionalVotingPower) -> Self { - // normalize the voting power - // https://github.com/anoma/ethereum-bridge/blob/fe93d2e95ddb193a759811a79c8464ad4d709c12/test/utils/utilities.js#L29 - const NORMALIZED_VOTING_POWER: Uint = - Uint::from_u64(EthBridgeVotingPower::MAX.0); + fn from(FractionalVotingPower(ratio): &FractionalVotingPower) -> Self { + let max_bridge_voting_power = Uint::from(EthBridgeVotingPower::MAX.0); - let voting_power = ratio.0 * NORMALIZED_VOTING_POWER; - let voting_power = voting_power.round().to_integer().low_u64(); + let voting_power = ratio * max_bridge_voting_power; + let voting_power = voting_power.round().to_integer().low_u128(); Self(voting_power) } @@ -78,9 +85,9 @@ impl From for ethereum::U256 { } } -impl From for u64 { +impl From for u128 { #[inline] - fn from(EthBridgeVotingPower(voting_power): EthBridgeVotingPower) -> u64 { + fn from(EthBridgeVotingPower(voting_power): EthBridgeVotingPower) -> u128 { voting_power } } @@ -170,6 +177,24 @@ impl Mul<&FractionalVotingPower> for FractionalVotingPower { } } +impl Mul for FractionalVotingPower { + type Output = Amount; + + fn mul(self, rhs: Amount) -> Self::Output { + self * &rhs + } +} + +impl Mul<&Amount> for FractionalVotingPower { + type Output = Amount; + + fn mul(self, &rhs: &Amount) -> Self::Output { + let whole: Uint = rhs.into(); + let fraction = (self.0 * whole).to_integer(); + Amount::from_uint(fraction, 0u8).unwrap() + } +} + impl Add for FractionalVotingPower { type Output = Self; diff --git a/ethereum_bridge/src/oracle/config.rs b/ethereum_bridge/src/oracle/config.rs index 4ddb3ede1d1..b0718e81d94 100644 --- a/ethereum_bridge/src/oracle/config.rs +++ b/ethereum_bridge/src/oracle/config.rs @@ -12,8 +12,6 @@ pub struct Config { pub min_confirmations: NonZeroU64, /// The Ethereum address of the current bridge contract. pub bridge_contract: EthAddress, - /// The Ethereum address of the current governance contract. - pub governance_contract: EthAddress, /// The earliest Ethereum block from which events may be processed. pub start_block: ethereum_structs::BlockHeight, } @@ -27,7 +25,6 @@ impl std::default::Default for Config { // value that is >= 1 min_confirmations: unsafe { NonZeroU64::new_unchecked(100) }, bridge_contract: EthAddress([0; 20]), - governance_contract: EthAddress([1; 20]), start_block: 0.into(), } } diff --git a/ethereum_bridge/src/parameters.rs b/ethereum_bridge/src/parameters.rs index 6395dd6cabc..c86ef1e6ed3 100644 --- a/ethereum_bridge/src/parameters.rs +++ b/ethereum_bridge/src/parameters.rs @@ -137,8 +137,6 @@ pub struct Contracts { pub native_erc20: EthAddress, /// The Ethereum address of the bridge contract. pub bridge: UpgradeableContract, - /// The Ethereum address of the governance contract. - pub governance: UpgradeableContract, } /// Represents chain parameters for the Ethereum bridge. @@ -183,14 +181,12 @@ impl EthereumBridgeConfig { Contracts { native_erc20, bridge, - governance, }, } = self; let active_key = bridge_storage::active_key(); let min_confirmations_key = bridge_storage::min_confirmations_key(); let native_erc20_key = bridge_storage::native_erc20_key(); let bridge_contract_key = bridge_storage::bridge_contract_key(); - let governance_contract_key = bridge_storage::governance_contract_key(); let eth_start_height_key = bridge_storage::eth_start_height_key(); wl_storage .write_bytes( @@ -207,9 +203,6 @@ impl EthereumBridgeConfig { wl_storage .write_bytes(&bridge_contract_key, encode(bridge)) .unwrap(); - wl_storage - .write_bytes(&governance_contract_key, encode(governance)) - .unwrap(); wl_storage .write_bytes(ð_start_height_key, encode(eth_start_height)) .unwrap(); @@ -309,7 +302,6 @@ impl EthereumOracleConfig { let min_confirmations_key = bridge_storage::min_confirmations_key(); let native_erc20_key = bridge_storage::native_erc20_key(); let bridge_contract_key = bridge_storage::bridge_contract_key(); - let governance_contract_key = bridge_storage::governance_contract_key(); let eth_start_height_key = bridge_storage::eth_start_height_key(); // These reads must succeed otherwise the storage is corrupt or a @@ -318,8 +310,6 @@ impl EthereumOracleConfig { must_read_key(wl_storage, &min_confirmations_key); let native_erc20 = must_read_key(wl_storage, &native_erc20_key); let bridge_contract = must_read_key(wl_storage, &bridge_contract_key); - let governance_contract = - must_read_key(wl_storage, &governance_contract_key); let eth_start_height = must_read_key(wl_storage, ð_start_height_key); Some(Self { @@ -328,7 +318,6 @@ impl EthereumOracleConfig { contracts: Contracts { native_erc20, bridge: bridge_contract, - governance: governance_contract, }, }) } @@ -403,10 +392,6 @@ mod tests { address: EthAddress([23; 20]), version: ContractVersion::default(), }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: ContractVersion::default(), - }, }, }; let serialized = toml::to_string(&config)?; @@ -429,10 +414,6 @@ mod tests { address: EthAddress([23; 20]), version: ContractVersion::default(), }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: ContractVersion::default(), - }, }, }; config.init_storage(&mut wl_storage); @@ -465,10 +446,6 @@ mod tests { address: EthAddress([23; 20]), version: ContractVersion::default(), }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: ContractVersion::default(), - }, }, }; config.init_storage(&mut wl_storage); diff --git a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 85718814536..3271efeed58 100644 --- a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -1,15 +1,14 @@ use std::collections::{HashMap, HashSet}; -use borsh::BorshSerialize; use eyre::Result; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_signed_root_key; use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; -use namada_core::ledger::storage_api::StorageWrite; +use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::address::Address; use namada_core::types::storage::BlockHeight; +use namada_core::types::token::Amount; use namada_core::types::transaction::TxResult; use namada_core::types::vote_extensions::bridge_pool_roots::MultiSignedVext; -use namada_core::types::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; use crate::protocol::transactions::utils::GetVoters; @@ -48,32 +47,66 @@ where let root_height = vext.iter().next().unwrap().data.block_height; let (partial_proof, seen_by) = parse_vexts(wl_storage, vext); + // return immediately if a complete proof has already been acquired + let bp_key = vote_tallies::Keys::from((&partial_proof, root_height)); + let seen = + votes::storage::maybe_read_seen(wl_storage, &bp_key)?.unwrap_or(false); + if seen { + tracing::debug!( + ?root_height, + ?partial_proof, + "Bridge pool root tally is already complete" + ); + return Ok(TxResult::default()); + } + // apply updates to the bridge pool root. - let (mut changed, confirmed) = apply_update( + let (mut changed, confirmed_update) = apply_update( wl_storage, - partial_proof.clone(), + bp_key, + partial_proof, seen_by, &voting_powers, )?; // if the root is confirmed, update storage and add // relevant key to changed. - if confirmed { - let proof = votes::storage::read_body( - wl_storage, - &vote_tallies::Keys::from(&partial_proof), - )?; - wl_storage - .write_bytes( - &get_signed_root_key(), - (proof, root_height) - .try_to_vec() - .expect("Serializing a Bridge pool root shouldn't fail."), - ) + if let Some(proof) = confirmed_update { + let signed_root_key = get_signed_root_key(); + let should_write_root = wl_storage + .read::<(BridgePoolRoot, BlockHeight)>(&signed_root_key) .expect( - "Writing a signed bridge pool root to storage should not fail.", + "Reading a signed Bridge pool root from storage should not \ + fail", + ) + .map(|(_, existing_root_height)| { + // only write the newly confirmed signed root if + // it is more recent than the existing root in + // storage + existing_root_height < root_height + }) + .unwrap_or({ + // if no signed root was present in storage, write the new one + true + }); + if should_write_root { + tracing::debug!( + ?root_height, + "New Bridge pool root proof acquired" ); - changed.insert(get_signed_root_key()); + wl_storage + .write(&signed_root_key, (proof, root_height)) + .expect( + "Writing a signed Bridge pool root to storage should not \ + fail.", + ); + changed.insert(get_signed_root_key()); + } else { + tracing::debug!( + ?root_height, + "Discarding outdated Bridge pool root proof" + ); + } } Ok(TxResult { @@ -138,15 +171,15 @@ where /// In all instances, the changed storage keys are returned. fn apply_update( wl_storage: &mut WlStorage, + bp_key: vote_tallies::Keys, mut update: BridgePoolRoot, seen_by: Votes, - voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, -) -> Result<(ChangedKeys, bool)> + voting_powers: &HashMap<(Address, BlockHeight), Amount>, +) -> Result<(ChangedKeys, Option)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let bp_key = vote_tallies::Keys::from(&update); let partial_proof = votes::storage::read_body(wl_storage, &bp_key); let (vote_tracking, changed, confirmed, already_present) = if let Ok( partial, @@ -162,7 +195,7 @@ where let (vote_tracking, changed) = votes::update::calculate(wl_storage, &bp_key, new_votes)?; if changed.is_empty() { - return Ok((changed, false)); + return Ok((changed, None)); } let confirmed = vote_tracking.seen && changed.contains(&bp_key.seen()); (vote_tracking, changed, confirmed, true) @@ -181,13 +214,14 @@ where &vote_tracking, already_present, )?; - Ok((changed, confirmed)) + Ok((changed, confirmed.then_some(update))) } #[cfg(test)] mod test_apply_bp_roots_to_storage { use std::collections::BTreeSet; + use assert_matches::assert_matches; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::ledger::eth_bridge::storage::bridge_pool::{ get_key_from_hash, get_nonce_key, @@ -196,17 +230,13 @@ mod test_apply_bp_roots_to_storage { use namada_core::ledger::storage_api::StorageRead; use namada_core::proto::{SignableEthMessage, Signed}; use namada_core::types::address; - use namada_core::types::dec::Dec; use namada_core::types::ethereum_events::Uint; use namada_core::types::keccak::{keccak_hash, KeccakHash}; - use namada_core::types::key::RefTo; use namada_core::types::storage::Key; - use namada_core::types::token::Amount; use namada_core::types::vote_extensions::bridge_pool_roots; + use namada_core::types::voting_power::FractionalVotingPower; use namada_proof_of_stake::parameters::PosParams; - use namada_proof_of_stake::{ - become_validator, bond_tokens, write_pos_params, BecomeValidator, - }; + use namada_proof_of_stake::write_pos_params; use super::*; use crate::protocol::transactions::votes::{ @@ -241,6 +271,11 @@ mod test_apply_bp_roots_to_storage { ]), ); bridge_pool_vp::init_storage(&mut wl_storage); + test_utils::commit_bridge_pool_root_at_height( + &mut wl_storage.storage, + &KeccakHash([1; 32]), + 99.into(), + ); test_utils::commit_bridge_pool_root_at_height( &mut wl_storage.storage, &KeccakHash([1; 32]), @@ -292,8 +327,9 @@ mod test_apply_bp_roots_to_storage { let TxResult { changed_keys, .. } = apply_derived_tx(&mut wl_storage, vext.into()) .expect("Test failed"); - let bp_root_key = vote_tallies::Keys::from(BridgePoolRoot( - BridgePoolRootProof::new((root, nonce)), + let bp_root_key = vote_tallies::Keys::from(( + &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), + 100.into(), )); let expected: BTreeSet = bp_root_key.into_iter().collect(); assert_eq!(expected, changed_keys); @@ -349,8 +385,9 @@ mod test_apply_bp_roots_to_storage { vexts.insert(vext); let TxResult { changed_keys, .. } = apply_derived_tx(&mut wl_storage, vexts).expect("Test failed"); - let bp_root_key = vote_tallies::Keys::from(BridgePoolRoot( - BridgePoolRootProof::new((root, nonce)), + let bp_root_key = vote_tallies::Keys::from(( + &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), + 100.into(), )); let mut expected: BTreeSet = bp_root_key.into_iter().collect(); @@ -392,8 +429,9 @@ mod test_apply_bp_roots_to_storage { let TxResult { changed_keys, .. } = apply_derived_tx(&mut wl_storage, vext.into()) .expect("Test failed"); - let bp_root_key = vote_tallies::Keys::from(BridgePoolRoot( - BridgePoolRootProof::new((root, nonce)), + let bp_root_key = vote_tallies::Keys::from(( + &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), + 100.into(), )); let expected: BTreeSet = [ bp_root_key.seen(), @@ -417,8 +455,9 @@ mod test_apply_bp_roots_to_storage { let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); - let bp_root_key = vote_tallies::Keys::from(BridgePoolRoot( - BridgePoolRootProof::new((root, nonce)), + let bp_root_key = vote_tallies::Keys::from(( + &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), + 100.into(), )); let hot_key = &keys[&validators[0]].eth_bridge; @@ -435,7 +474,7 @@ mod test_apply_bp_roots_to_storage { .read::(&bp_root_key.voting_power()) .expect("Test failed") .expect("Test failed") - .average_voting_power(&wl_storage); + .fractional_stake(&wl_storage); assert_eq!( voting_power, FractionalVotingPower::new_u64(5, 12).unwrap() @@ -454,7 +493,7 @@ mod test_apply_bp_roots_to_storage { .read::(&bp_root_key.voting_power()) .expect("Test failed") .expect("Test failed") - .average_voting_power(&wl_storage); + .fractional_stake(&wl_storage); assert_eq!(voting_power, FractionalVotingPower::new_u64(5, 6).unwrap()); } @@ -471,8 +510,9 @@ mod test_apply_bp_roots_to_storage { let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; - let bp_root_key = vote_tallies::Keys::from(BridgePoolRoot( - BridgePoolRootProof::new((root, nonce)), + let bp_root_key = vote_tallies::Keys::from(( + &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), + 100.into(), )); let vext = bridge_pool_roots::Vext { @@ -529,8 +569,9 @@ mod test_apply_bp_roots_to_storage { let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; - let bp_root_key = vote_tallies::Keys::from(BridgePoolRoot( - BridgePoolRootProof::new((root, nonce)), + let bp_root_key = vote_tallies::Keys::from(( + &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), + 100.into(), )); let vext = bridge_pool_roots::Vext { @@ -593,7 +634,7 @@ mod test_apply_bp_roots_to_storage { let hot_key = &keys[&validators[0]].eth_bridge; let mut expected = BridgePoolRoot(BridgePoolRootProof::new((root, nonce))); - let bp_root_key = vote_tallies::Keys::from(&expected); + let bp_root_key = vote_tallies::Keys::from((&expected, 100.into())); let vext = bridge_pool_roots::Vext { validator_addr: validators[0].clone(), @@ -720,32 +761,16 @@ mod test_apply_bp_roots_to_storage { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, params.clone()).expect("Test failed"); + write_pos_params(&mut wl_storage, params).expect("Test failed"); // insert validators 2 and 3 at epoch 1 - for (validator, stake) in [ - (&validator_2, validator_2_stake), - (&validator_3, validator_3_stake), - ] { - let keys = test_utils::TestValidatorKeys::generate(); - let consensus_key = &keys.consensus.ref_to(); - let eth_cold_key = &keys.eth_gov.ref_to(); - let eth_hot_key = &keys.eth_bridge.ref_to(); - become_validator(BecomeValidator { - storage: &mut wl_storage, - params: ¶ms, - address: validator, - consensus_key, - eth_cold_key, - eth_hot_key, - current_epoch: 0.into(), - commission_rate: Dec::new(5, 2).unwrap(), - max_commission_rate_change: Dec::new(1, 2).unwrap(), - }) - .expect("Test failed"); - bond_tokens(&mut wl_storage, None, validator, stake, 0.into()) - .expect("Test failed"); - } + test_utils::append_validators_to_storage( + &mut wl_storage, + HashMap::from([ + (validator_2.clone(), validator_2_stake), + (validator_3.clone(), validator_3_stake), + ]), + ); // query validators to make sure they were inserted correctly macro_rules! query_validators { @@ -770,6 +795,12 @@ mod test_apply_bp_roots_to_storage { epoch_0_validators, HashMap::from([(validator_1.clone(), validator_1_stake)]) ); + assert_eq!( + wl_storage + .pos_queries() + .get_total_voting_power(Some(0.into())), + validator_1_stake, + ); assert_eq!( epoch_1_validators, HashMap::from([ @@ -778,6 +809,12 @@ mod test_apply_bp_roots_to_storage { (validator_3, validator_3_stake), ]) ); + assert_eq!( + wl_storage + .pos_queries() + .get_total_voting_power(Some(1.into())), + validator_1_stake + validator_2_stake + validator_3_stake, + ); // set up the bridge pool's storage bridge_pool_vp::init_storage(&mut wl_storage); @@ -817,4 +854,79 @@ mod test_apply_bp_roots_to_storage { let root_epoch_validators = query_validators(root_epoch.0); assert_eq!(epoch_0_validators, root_epoch_validators); } + + #[test] + /// Test that a signed root is not overwritten in storage + /// if a signed root is decided that had been signed at a + /// less recent block height. + fn test_more_recent_signed_root_not_overwritten() { + let TestPackage { + validators, + keys, + mut wl_storage, + } = setup(); + + let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); + let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); + + macro_rules! decide_at_height { + ($block_height:expr) => { + let hot_key = &keys[&validators[0]].eth_bridge; + let vext = bridge_pool_roots::Vext { + validator_addr: validators[0].clone(), + block_height: $block_height.into(), + sig: Signed::<_, SignableEthMessage>::new( + hot_key, + to_sign.clone(), + ) + .sig, + } + .sign(&keys[&validators[0]].protocol); + _ = apply_derived_tx(&mut wl_storage, vext.into()) + .expect("Test failed"); + let hot_key = &keys[&validators[1]].eth_bridge; + let vext = bridge_pool_roots::Vext { + validator_addr: validators[1].clone(), + block_height: $block_height.into(), + sig: Signed::<_, SignableEthMessage>::new( + hot_key, + to_sign.clone(), + ) + .sig, + } + .sign(&keys[&validators[1]].protocol); + _ = apply_derived_tx(&mut wl_storage, vext.into()) + .expect("Test failed"); + }; + } + + // decide bridge pool root signed at block height 100 + decide_at_height!(100); + + // check the signed root in storage + let root_in_storage = wl_storage + .read::<(BridgePoolRoot, BlockHeight)>(&get_signed_root_key()) + .expect("Test failed - storage read failed") + .expect("Test failed - no signed root in storage"); + assert_matches!( + root_in_storage, + (BridgePoolRoot(r), BlockHeight(100)) + if r.data.0 == root && r.data.1 == nonce + ); + + // decide bridge pool root signed at block height 99 + decide_at_height!(99); + + // check the signed root in storage is unchanged + let root_in_storage = wl_storage + .read::<(BridgePoolRoot, BlockHeight)>(&get_signed_root_key()) + .expect("Test failed - storage read failed") + .expect("Test failed - no signed root in storage"); + assert_matches!( + root_in_storage, + (BridgePoolRoot(r), BlockHeight(100)) + if r.data.0 == root && r.data.1 == nonce + ); + } } diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index 48aac0b8de4..d878a56d11c 100644 --- a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -45,29 +45,17 @@ where H: 'static + StorageHasher + Sync, { match event { - EthereumEvent::TransfersToNamada { - transfers, - valid_transfers_map, - nonce, - } => act_on_transfers_to_namada( - wl_storage, - TransfersToNamada { - transfers, - valid_transfers_map, - nonce, - }, - ), + EthereumEvent::TransfersToNamada { transfers, nonce } => { + act_on_transfers_to_namada( + wl_storage, + TransfersToNamada { transfers, nonce }, + ) + } EthereumEvent::TransfersToEthereum { ref transfers, ref relayer, - ref valid_transfers_map, .. - } => act_on_transfers_to_eth( - wl_storage, - transfers, - valid_transfers_map, - relayer, - ), + } => act_on_transfers_to_eth(wl_storage, transfers, relayer), _ => { tracing::debug!(?event, "No actions taken for Ethereum event"); Ok(BTreeSet::default()) @@ -93,28 +81,11 @@ where .transfers_to_namada .push_and_iter(transfer_event) .collect(); - for TransfersToNamada { - transfers, - valid_transfers_map, - .. - } in confirmed_events - { + for TransfersToNamada { transfers, .. } in confirmed_events { update_transfers_to_namada_state( wl_storage, &mut changed_keys, - transfers.iter().zip(valid_transfers_map.iter()).filter_map( - |(transfer, &valid)| { - if valid { - Some(transfer) - } else { - tracing::debug!( - ?transfer, - "Ignoring invalid transfer to Namada event" - ); - None - } - }, - ), + transfers.iter(), )?; } Ok(changed_keys) @@ -329,18 +300,13 @@ where fn act_on_transfers_to_eth( wl_storage: &mut WlStorage, transfers: &[TransferToEthereum], - valid_transfers: &[bool], relayer: &Address, ) -> Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - tracing::debug!( - ?transfers, - ?valid_transfers, - "Acting on transfers to Ethereum" - ); + tracing::debug!(?transfers, "Acting on transfers to Ethereum"); let mut changed_keys = BTreeSet::default(); // the BP nonce should always be incremented, even if no valid @@ -362,9 +328,7 @@ where .filter(is_pending_transfer_key) .collect(); // Remove the completed transfers from the bridge pool - for (event, is_valid) in - transfers.iter().zip(valid_transfers.iter().copied()) - { + for event in transfers { let (pending_transfer, key) = if let Some((pending, key)) = wl_storage.ethbridge_queries().lookup_transfer_to_eth(event) { @@ -373,27 +337,15 @@ where hints::cold(); unreachable!("The transfer should exist in the bridge pool"); }; - if hints::likely(is_valid) { - tracing::debug!( - ?pending_transfer, - "Valid transfer to Ethereum detected, compensating the \ - relayer and burning any Ethereum assets in Namada" - ); - changed_keys.append(&mut update_transferred_asset_balances( - wl_storage, - &pending_transfer, - )?); - } else { - tracing::debug!( - ?pending_transfer, - "Invalid transfer to Ethereum detected, compensating the \ - relayer and refunding assets in Namada" - ); - changed_keys.append(&mut refund_transferred_assets( - wl_storage, - &pending_transfer, - )?); - } + tracing::debug!( + ?pending_transfer, + "Valid transfer to Ethereum detected, compensating the relayer \ + and burning any Ethereum assets in Namada" + ); + changed_keys.append(&mut update_transferred_asset_balances( + wl_storage, + &pending_transfer, + )?); let pool_balance_key = balance_key(&pending_transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); let relayer_rewards_key = @@ -625,8 +577,7 @@ mod tests { use namada_core::types::address::{gen_established_address, nam, wnam}; use namada_core::types::eth_bridge_pool::GasFee; use namada_core::types::ethereum_events::testing::{ - arbitrary_eth_address, arbitrary_keccak_hash, arbitrary_nonce, - DAI_ERC20_ETH_ADDRESS, + arbitrary_keccak_hash, arbitrary_nonce, DAI_ERC20_ETH_ADDRESS, }; use namada_core::types::time::DurationSecs; use namada_core::types::token::Amount; @@ -860,21 +811,11 @@ mod tests { let mut wl_storage = TestWlStorage::default(); test_utils::bootstrap_ethereum_bridge(&mut wl_storage); let initial_stored_keys_count = stored_keys_count(&wl_storage); - let events = vec![ - EthereumEvent::NewContract { - name: "bridge".to_string(), - address: arbitrary_eth_address(), - }, - EthereumEvent::UpgradedContract { - name: "bridge".to_string(), - address: arbitrary_eth_address(), - }, - EthereumEvent::ValidatorSetUpdate { - nonce: arbitrary_nonce(), - bridge_validator_hash: arbitrary_keccak_hash(), - governance_validator_hash: arbitrary_keccak_hash(), - }, - ]; + let events = vec![EthereumEvent::ValidatorSetUpdate { + nonce: arbitrary_nonce(), + bridge_validator_hash: arbitrary_keccak_hash(), + governance_validator_hash: arbitrary_keccak_hash(), + }]; for event in events { act_on(&mut wl_storage, event.clone()).unwrap(); @@ -904,7 +845,6 @@ mod tests { }]; let event = EthereumEvent::TransfersToNamada { nonce: arbitrary_nonce(), - valid_transfers_map: transfers.iter().map(|_| true).collect(), transfers, }; @@ -1084,7 +1024,6 @@ mod tests { .collect(); let event = EthereumEvent::TransfersToEthereum { nonce: arbitrary_nonce(), - valid_transfers_map: transfers.iter().map(|_| true).collect(), transfers, relayer: relayer.clone(), }; @@ -1235,7 +1174,6 @@ mod tests { let event = EthereumEvent::TransfersToEthereum { nonce: arbitrary_nonce(), transfers: vec![], - valid_transfers_map: vec![], relayer: gen_implicit_address(), }; let _ = act_on(&mut wl_storage, event).unwrap(); @@ -1428,17 +1366,16 @@ mod tests { ], ); init_balance(&mut wl_storage, &pending_transfers); - let (transfers, valid_transfers_map) = pending_transfers + let transfers = pending_transfers .into_iter() .map(|ref transfer| { let transfer_to_eth: TransferToEthereum = transfer.into(); - (transfer_to_eth, true) + transfer_to_eth }) - .unzip(); + .collect(); let relayer = gen_established_address("random"); let event = EthereumEvent::TransfersToEthereum { nonce: arbitrary_nonce(), - valid_transfers_map, transfers, relayer, }; diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index 97fa999fb47..8b71bb26c41 100644 --- a/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -12,10 +12,11 @@ use namada_core::ledger::storage::traits::StorageHasher; use namada_core::ledger::storage::{DBIter, WlStorage, DB}; use namada_core::types::address::Address; use namada_core::types::ethereum_events::EthereumEvent; +use namada_core::types::internal::ExpiredTx; use namada_core::types::storage::{BlockHeight, Epoch, Key}; +use namada_core::types::token::Amount; use namada_core::types::transaction::TxResult; use namada_core::types::vote_extensions::ethereum_events::MultiSignedEthEvent; -use namada_core::types::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; use super::ChangedKeys; @@ -86,7 +87,7 @@ where pub(super) fn apply_updates( wl_storage: &mut WlStorage, updates: HashSet, - voting_powers: HashMap<(Address, BlockHeight), FractionalVotingPower>, + voting_powers: HashMap<(Address, BlockHeight), Amount>, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -133,7 +134,7 @@ where fn apply_update( wl_storage: &mut WlStorage, update: EthMsgUpdate, - voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, + voting_powers: &HashMap<(Address, BlockHeight), Amount>, ) -> Result<(ChangedKeys, bool)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -199,7 +200,22 @@ where %keys.prefix, "Ethereum event timed out", ); - votes::storage::delete(wl_storage, &keys)?; + if let Some(event) = votes::storage::delete(wl_storage, &keys)? { + tracing::debug!( + %keys.prefix, + "Queueing Ethereum event for retransmission", + ); + // NOTE: if we error out in the `ethereum_bridge` crate, + // currently there is no way to reset the expired txs queue + // to its previous state. this shouldn't be a big deal, as + // replaying ethereum events has no effect on the ledger. + // however, we may need to revisit this code if we ever + // implement slashing on double voting of ethereum events. + wl_storage + .storage + .expired_txs_queue + .push(ExpiredTx::EthereumEvent(event)); + } changed.extend(keys.clone().into_iter()); } @@ -284,7 +300,8 @@ mod tests { use namada_core::types::ethereum_events::{ EthereumEvent, TransferToNamada, }; - use namada_core::types::token::{balance_key, minted_balance_key, Amount}; + use namada_core::types::token::{balance_key, minted_balance_key}; + use namada_core::types::voting_power::FractionalVotingPower; use super::*; use crate::protocol::transactions::utils::GetVoters; @@ -305,14 +322,13 @@ mod tests { #[test] /// Test applying a `TransfersToNamada` batch containing a single transfer fn test_apply_single_transfer() -> Result<()> { - let sole_validator = address::testing::gen_established_address(); + let (sole_validator, validator_stake) = test_utils::default_validator(); let receiver = address::testing::established_address_2(); let amount = arbitrary_amount(); let asset = arbitrary_eth_address(); let body = EthereumEvent::TransfersToNamada { nonce: arbitrary_nonce(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount, asset, @@ -326,10 +342,9 @@ mod tests { let updates = HashSet::from_iter(vec![update]); let voting_powers = HashMap::from_iter(vec![( (sole_validator.clone(), BlockHeight(100)), - FractionalVotingPower::WHOLE, + validator_stake, )]); - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let (mut wl_storage, _) = test_utils::setup_default_storage(); test_utils::whitelist_tokens( &mut wl_storage, [( @@ -377,7 +392,7 @@ mod tests { let voting_power = wl_storage .read::(ð_msg_keys.voting_power())? .expect("Test failed") - .average_voting_power(&wl_storage); + .fractional_stake(&wl_storage); assert_eq!(voting_power, FractionalVotingPower::WHOLE); let epoch_bytes = @@ -414,7 +429,6 @@ mod tests { test_utils::setup_storage_with_validators(HashMap::from_iter( vec![(sole_validator.clone(), Amount::native_whole(100))], )); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); test_utils::whitelist_tokens( &mut wl_storage, [( @@ -429,7 +443,6 @@ mod tests { let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount: Amount::from(100), asset: DAI_ERC20_ETH_ADDRESS, @@ -488,12 +501,10 @@ mod tests { (validator_b, Amount::native_whole(100)), ]), ); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); let receiver = address::testing::established_address_1(); let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount: Amount::from(100), asset: DAI_ERC20_ETH_ADDRESS, @@ -545,7 +556,6 @@ mod tests { let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount: Amount::from(100), asset: DAI_ERC20_ETH_ADDRESS, @@ -590,7 +600,7 @@ mod tests { let voting_power = wl_storage .read::(ð_msg_keys.voting_power())? .expect("Test failed") - .average_voting_power(&wl_storage); + .fractional_stake(&wl_storage); assert_eq!(voting_power, FractionalVotingPower::HALF); Ok(()) @@ -664,12 +674,10 @@ mod tests { (validator_b, Amount::native_whole(100)), ]), ); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); let receiver = address::testing::established_address_1(); let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount: Amount::from(100), asset: DAI_ERC20_ETH_ADDRESS, @@ -700,7 +708,6 @@ mod tests { let new_event = EthereumEvent::TransfersToNamada { nonce: 1.into(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount: Amount::from(100), asset: DAI_ERC20_ETH_ADDRESS, @@ -793,12 +800,10 @@ mod tests { (validator_b.clone(), Amount::native_whole(100)), ]), ); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); let receiver = address::testing::established_address_1(); let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), - valid_transfers_map: vec![true], transfers: vec![TransferToNamada { amount: Amount::from(100), asset: DAI_ERC20_ETH_ADDRESS, @@ -821,7 +826,7 @@ mod tests { (KeyKind::VotingPower, Some(power)) => { let power = EpochedVotingPower::try_from_slice(&power) .expect("Test failed") - .average_voting_power(&wl_storage); + .fractional_stake(&wl_storage); assert_eq!(power, FractionalVotingPower::HALF); } (_, Some(_)) => {} @@ -851,7 +856,7 @@ mod tests { (KeyKind::VotingPower, Some(power)) => { let power = EpochedVotingPower::try_from_slice(&power) .expect("Test failed") - .average_voting_power(&wl_storage); + .fractional_stake(&wl_storage); assert_eq!(power, FractionalVotingPower::HALF); } (_, Some(_)) => {} diff --git a/ethereum_bridge/src/protocol/transactions/utils.rs b/ethereum_bridge/src/protocol/transactions/utils.rs index 1f06c46a302..d2f44c995e9 100644 --- a/ethereum_bridge/src/protocol/transactions/utils.rs +++ b/ethereum_bridge/src/protocol/transactions/utils.rs @@ -6,7 +6,6 @@ use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; use namada_core::types::address::Address; use namada_core::types::storage::BlockHeight; use namada_core::types::token; -use namada_core::types::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::WeightedValidator; @@ -25,7 +24,7 @@ pub(super) trait GetVoters { pub(super) fn get_voting_powers( wl_storage: &WlStorage, proof: P, -) -> eyre::Result> +) -> eyre::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -85,15 +84,13 @@ where pub(super) fn get_voting_powers_for_selected( all_consensus: &BTreeMap>, selected: HashSet<(Address, BlockHeight)>, -) -> eyre::Result> { - let total_voting_powers = - sum_voting_powers_for_block_heights(all_consensus); +) -> eyre::Result> { let voting_powers = selected .into_iter() .map( |(addr, height)| -> eyre::Result<( (Address, BlockHeight), - FractionalVotingPower, + token::Amount, )> { let consensus_validators = all_consensus.get(&height).ok_or_else(|| { @@ -101,7 +98,7 @@ pub(super) fn get_voting_powers_for_selected( "No consensus validators found for height {height}" ) })?; - let individual_voting_power = consensus_validators + let voting_power = consensus_validators .iter() .find(|&v| v.address == addr) .ok_or_else(|| { @@ -111,21 +108,9 @@ pub(super) fn get_voting_powers_for_selected( ) })? .bonded_stake; - let total_voting_power = total_voting_powers - .get(&height) - .ok_or_else(|| { - eyre!( - "No total voting power provided for height \ - {height}" - ) - })? - .to_owned(); Ok(( (addr, height), - FractionalVotingPower::new( - individual_voting_power.into(), - total_voting_power.into(), - )?, + voting_power, )) }, ) @@ -133,24 +118,6 @@ pub(super) fn get_voting_powers_for_selected( Ok(voting_powers) } -pub(super) fn sum_voting_powers_for_block_heights( - validators: &BTreeMap>, -) -> BTreeMap { - validators - .iter() - .map(|(h, vs)| (h.to_owned(), sum_voting_powers(vs))) - .collect() -} - -pub(super) fn sum_voting_powers( - validators: &BTreeSet, -) -> token::Amount { - validators - .iter() - .map(|validator| validator.bonded_stake) - .sum::() -} - #[cfg(test)] mod tests { use std::collections::HashSet; @@ -158,6 +125,7 @@ mod tests { use assert_matches::assert_matches; use namada_core::types::address; use namada_core::types::ethereum_events::testing::arbitrary_bonded_stake; + use namada_core::types::voting_power::FractionalVotingPower; use super::*; @@ -190,7 +158,7 @@ mod tests { assert_eq!(voting_powers.len(), 1); assert_matches!( voting_powers.get(&(sole_validator, BlockHeight(100))), - Some(v) if *v == FractionalVotingPower::WHOLE + Some(v) if *v == bonded_stake ); } @@ -263,6 +231,7 @@ mod tests { weighted_validator_2, ]), )]); + let bonded_stake = bonded_stake_1 + bonded_stake_2; let result = get_voting_powers_for_selected(&consensus_validators, validators); @@ -272,56 +241,17 @@ mod tests { Err(error) => panic!("error: {:?}", error), }; assert_eq!(voting_powers.len(), 2); + let expected_stake = + FractionalVotingPower::new_u64(100, 300).unwrap() * bonded_stake; assert_matches!( voting_powers.get(&(validator_1, BlockHeight(100))), - Some(v) if *v == FractionalVotingPower::new_u64(100, 300).unwrap() + Some(v) if *v == expected_stake ); + let expected_stake = + FractionalVotingPower::new_u64(200, 300).unwrap() * bonded_stake; assert_matches!( voting_powers.get(&(validator_2, BlockHeight(100))), - Some(v) if *v == FractionalVotingPower::new_u64(200, 300).unwrap() + Some(v) if *v == expected_stake ); } - - #[test] - /// Test summing the voting powers for a set of validators containing only - /// one validator - fn test_sum_voting_powers_sole_validator() { - let sole_validator = address::testing::established_address_1(); - let bonded_stake = arbitrary_bonded_stake(); - let weighted_sole_validator = WeightedValidator { - bonded_stake, - address: sole_validator, - }; - let validators = BTreeSet::from_iter(vec![weighted_sole_validator]); - - let total = sum_voting_powers(&validators); - - assert_eq!(total, bonded_stake); - } - - #[test] - /// Test summing the voting powers for a set of validators containing two - /// validators - fn test_sum_voting_powers_two_validators() { - let validator_1 = address::testing::established_address_1(); - let validator_2 = address::testing::established_address_2(); - let bonded_stake_1 = token::Amount::from(100); - let bonded_stake_2 = token::Amount::from(200); - let weighted_validator_1 = WeightedValidator { - bonded_stake: bonded_stake_1, - address: validator_1, - }; - let weighted_validator_2 = WeightedValidator { - bonded_stake: bonded_stake_2, - address: validator_2, - }; - let validators = BTreeSet::from_iter(vec![ - weighted_validator_1, - weighted_validator_2, - ]); - - let total = sum_voting_powers(&validators); - - assert_eq!(total, token::Amount::from(300)); - } } diff --git a/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs b/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs index 63b6627d9c8..8457b82c4fb 100644 --- a/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs +++ b/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs @@ -6,11 +6,11 @@ use eyre::Result; use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; use namada_core::types::address::Address; use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::token::Amount; #[allow(unused_imports)] use namada_core::types::transaction::protocol::ProtocolTxType; use namada_core::types::transaction::TxResult; use namada_core::types::vote_extensions::validator_set_update; -use namada_core::types::voting_power::FractionalVotingPower; use super::ChangedKeys; use crate::protocol::transactions::utils; @@ -85,7 +85,7 @@ fn apply_update( ext: validator_set_update::VextDigest, signing_epoch: Epoch, epoch_2nd_height: BlockHeight, - voting_powers: HashMap<(Address, BlockHeight), FractionalVotingPower>, + voting_powers: HashMap<(Address, BlockHeight), Amount>, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -199,7 +199,6 @@ where #[cfg(test)] mod test_valset_upd_state_changes { use namada_core::types::address; - use namada_core::types::token::Amount; use namada_core::types::vote_extensions::validator_set_update::VotingPowersMap; use namada_core::types::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; diff --git a/ethereum_bridge/src/protocol/transactions/votes.rs b/ethereum_bridge/src/protocol/transactions/votes.rs index 5cf8fa4e3dd..c3a82bd370e 100644 --- a/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/ethereum_bridge/src/protocol/transactions/votes.rs @@ -5,7 +5,6 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use eyre::{eyre, Result}; -use namada_core::hints; use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; use namada_core::types::address::Address; use namada_core::types::storage::{BlockHeight, Epoch}; @@ -27,31 +26,48 @@ pub(super) mod update; pub type Votes = BTreeMap; /// The voting power behind a tally aggregated over multiple epochs. -pub type EpochedVotingPower = BTreeMap; +pub type EpochedVotingPower = BTreeMap; /// Extension methods for [`EpochedVotingPower`] instances. pub trait EpochedVotingPowerExt { - /// Get the total voting power staked across all epochs - /// in this [`EpochedVotingPower`]. - fn get_epoch_voting_powers( + /// Query the stake of the most secure [`Epoch`] referenced by an + /// [`EpochedVotingPower`]. This translates to the [`Epoch`] with + /// the most staked tokens. + fn epoch_max_voting_power( &self, wl_storage: &WlStorage, - ) -> HashMap + ) -> Option where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync; - /// Get the weighted average of some tally's voting powers pertaining to all - /// epochs it was held in. - fn average_voting_power( + /// Fetch the sum of the stake tallied on an + /// [`EpochedVotingPower`]. + fn tallied_stake(&self) -> token::Amount; + + /// Fetch the sum of the stake tallied on an + /// [`EpochedVotingPower`], as a fraction over + /// the maximum stake seen in the epochs voted on. + #[inline] + fn fractional_stake( &self, wl_storage: &WlStorage, ) -> FractionalVotingPower where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync; + H: 'static + StorageHasher + Sync, + { + let Some(max_voting_power) = self.epoch_max_voting_power(wl_storage) else { + return FractionalVotingPower::NULL; + }; + FractionalVotingPower::new( + self.tallied_stake().into(), + max_voting_power.into(), + ) + .unwrap() + } - /// Check if the [`Tally`] associated with this [`EpochedVotingPower`] + /// Check if the [`Tally`] associated with an [`EpochedVotingPower`] /// can be considered `seen`. #[inline] fn has_majority_quorum(&self, wl_storage: &WlStorage) -> bool @@ -59,16 +75,29 @@ pub trait EpochedVotingPowerExt { D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - self.average_voting_power(wl_storage) - > FractionalVotingPower::TWO_THIRDS + let Some(max_voting_power) = self.epoch_max_voting_power(wl_storage) else { + return false; + }; + // NB: Preserve the safety property of the Tendermint protocol across + // all the epochs we vote on. + // + // PROOF: We calculate the maximum amount of tokens S_max staked on + // one of the epochs the tally occurred in. At most F = 1/3 * S_max + // of the combined stake can be Byzantine, for the protocol to uphold + // its linearizability property whilst remaining "secure" against + // arbitrarily faulty nodes. Therefore, we can consider a tally secure + // if has accumulated an amount of stake greater than the threshold + // stake of S_max - F = 2/3 S_max. + let threshold = FractionalVotingPower::TWO_THIRDS * max_voting_power; + self.tallied_stake() > threshold } } impl EpochedVotingPowerExt for EpochedVotingPower { - fn get_epoch_voting_powers( + fn epoch_max_voting_power( &self, wl_storage: &WlStorage, - ) -> HashMap + ) -> Option where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -76,57 +105,13 @@ impl EpochedVotingPowerExt for EpochedVotingPower { self.keys() .copied() .map(|epoch| { - ( - epoch, - wl_storage - .pos_queries() - .get_total_voting_power(Some(epoch)), - ) + wl_storage.pos_queries().get_total_voting_power(Some(epoch)) }) - .collect() + .max() } - fn average_voting_power( - &self, - wl_storage: &WlStorage, - ) -> FractionalVotingPower - where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, - { - // if we only voted across a single epoch, we can avoid doing - // expensive I/O operations - if hints::likely(self.len() == 1) { - // TODO: switch to [`BTreeMap::first_entry`] when we start - // using Rust >= 1.66 - let Some(&power) = self.values().next() else { - hints::cold(); - unreachable!("The map has one value"); - }; - return power; - } - - let epoch_voting_powers = self.get_epoch_voting_powers(wl_storage); - let total_voting_power = epoch_voting_powers - .values() - .fold(token::Amount::from(0u64), |accum, &stake| accum + stake); - - self.iter().map(|(&epoch, &power)| (epoch, power)).fold( - FractionalVotingPower::NULL, - |average, (epoch, aggregated_voting_power)| { - let epoch_voting_power = epoch_voting_powers - .get(&epoch) - .copied() - .expect("This value should be in the map"); - debug_assert!(epoch_voting_power > 0.into()); - let weight = FractionalVotingPower::new( - epoch_voting_power.into(), - total_voting_power.into(), - ) - .unwrap(); - average + weight * aggregated_voting_power - }, - ) + fn tallied_stake(&self) -> token::Amount { + self.values().copied().sum::() } } @@ -153,7 +138,7 @@ pub struct Tally { pub fn calculate_new( wl_storage: &WlStorage, seen_by: Votes, - voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, + voting_powers: &HashMap<(Address, BlockHeight), token::Amount>, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -164,14 +149,14 @@ where match voting_powers .get(&(validator.to_owned(), block_height.to_owned())) { - Some(voting_power) => { + Some(&voting_power) => { let epoch = wl_storage .pos_queries() .get_epoch(*block_height) .expect("The queried epoch should be known"); let aggregated = seen_by_voting_power .entry(epoch) - .or_insert(FractionalVotingPower::NULL); + .or_insert_with(token::Amount::zero); *aggregated += voting_power; } None => { @@ -202,15 +187,10 @@ pub fn dedupe(signers: BTreeSet<(Address, BlockHeight)>) -> Votes { mod tests { use std::collections::BTreeSet; - use namada_core::ledger::storage::testing::TestWlStorage; - use namada_core::types::dec::Dec; - use namada_core::types::key::RefTo; use namada_core::types::storage::BlockHeight; use namada_core::types::{address, token}; use namada_proof_of_stake::parameters::PosParams; - use namada_proof_of_stake::{ - become_validator, bond_tokens, write_pos_params, BecomeValidator, - }; + use namada_proof_of_stake::write_pos_params; use super::*; use crate::test_utils; @@ -305,18 +285,21 @@ mod tests { /// fast path of the algorithm. #[test] fn test_tally_vote_single_epoch() { - let dummy_storage = TestWlStorage::default(); + let (_, dummy_validator_stake) = test_utils::default_validator(); + let (dummy_storage, _) = test_utils::setup_default_storage(); - let aggregated = - EpochedVotingPower::from([(0.into(), FractionalVotingPower::HALF)]); + let aggregated = EpochedVotingPower::from([( + 0.into(), + FractionalVotingPower::HALF * dummy_validator_stake, + )]); assert_eq!( - aggregated.average_voting_power(&dummy_storage), + aggregated.fractional_stake(&dummy_storage), FractionalVotingPower::HALF ); } /// Test that voting on a tally across epoch boundaries accounts - /// for the average voting power attained along those epochs. + /// for the maximum voting power attained along those epochs. #[test] fn test_voting_across_epoch_boundaries() { // the validators that will vote in the tally @@ -329,6 +312,9 @@ mod tests { let validator_3 = address::testing::established_address_3(); let validator_3_stake = token::Amount::native_whole(100); + let total_stake = + validator_1_stake + validator_2_stake + validator_3_stake; + // start epoch 0 with validator 1 let (mut wl_storage, _) = test_utils::setup_storage_with_validators( HashMap::from([(validator_1.clone(), validator_1_stake)]), @@ -339,32 +325,16 @@ mod tests { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, params.clone()).expect("Test failed"); + write_pos_params(&mut wl_storage, params).expect("Test failed"); // insert validators 2 and 3 at epoch 1 - for (validator, stake) in [ - (&validator_2, validator_2_stake), - (&validator_3, validator_3_stake), - ] { - let keys = test_utils::TestValidatorKeys::generate(); - let consensus_key = &keys.consensus.ref_to(); - let eth_cold_key = &keys.eth_gov.ref_to(); - let eth_hot_key = &keys.eth_bridge.ref_to(); - become_validator(BecomeValidator { - storage: &mut wl_storage, - params: ¶ms, - address: validator, - consensus_key, - eth_cold_key, - eth_hot_key, - current_epoch: 0.into(), - commission_rate: Dec::new(5, 2).unwrap(), - max_commission_rate_change: Dec::new(1, 2).unwrap(), - }) - .expect("Test failed"); - bond_tokens(&mut wl_storage, None, validator, stake, 0.into()) - .expect("Test failed"); - } + test_utils::append_validators_to_storage( + &mut wl_storage, + HashMap::from([ + (validator_2.clone(), validator_2_stake), + (validator_3.clone(), validator_3_stake), + ]), + ); // query validators to make sure they were inserted correctly let query_validators = |epoch: u64| { @@ -381,6 +351,12 @@ mod tests { epoch_0_validators, HashMap::from([(validator_1.clone(), validator_1_stake)]) ); + assert_eq!( + wl_storage + .pos_queries() + .get_total_voting_power(Some(0.into())), + validator_1_stake, + ); assert_eq!( epoch_1_validators, HashMap::from([ @@ -389,15 +365,21 @@ mod tests { (validator_3, validator_3_stake), ]) ); + assert_eq!( + wl_storage + .pos_queries() + .get_total_voting_power(Some(1.into())), + total_stake, + ); // check that voting works as expected let aggregated = EpochedVotingPower::from([ - (0.into(), FractionalVotingPower::ONE_THIRD), - (1.into(), FractionalVotingPower::ONE_THIRD), + (0.into(), FractionalVotingPower::ONE_THIRD * total_stake), + (1.into(), FractionalVotingPower::ONE_THIRD * total_stake), ]); assert_eq!( - aggregated.average_voting_power(&wl_storage), - FractionalVotingPower::ONE_THIRD + aggregated.fractional_stake(&wl_storage), + FractionalVotingPower::TWO_THIRDS ); } } diff --git a/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 4f6d107bb20..832797ae1b1 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -1,12 +1,14 @@ use borsh::{BorshDeserialize, BorshSerialize}; use eyre::{Result, WrapErr}; +use namada_core::hints; use namada_core::ledger::storage::{ DBIter, PrefixIter, StorageHasher, WlStorage, DB, }; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::storage::Key; +use namada_core::types::voting_power::FractionalVotingPower; -use super::{EpochedVotingPower, Tally, Votes}; +use super::{EpochedVotingPower, EpochedVotingPowerExt, Tally, Votes}; use crate::storage::vote_tallies; pub fn write( @@ -36,21 +38,39 @@ where Ok(()) } +/// Delete a tally from storage, and return the associated value of +/// type `T` being voted on, in case it has accumulated more than 1/3 +/// of fractional voting power behind it. +#[must_use = "The storage value returned by this function must be used"] pub fn delete( wl_storage: &mut WlStorage, keys: &vote_tallies::Keys, -) -> Result<()> +) -> Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - T: BorshSerialize, + T: BorshDeserialize, { + let opt_body = { + let voting_power: EpochedVotingPower = + super::read::value(wl_storage, &keys.voting_power())?; + + if hints::unlikely( + voting_power.fractional_stake(wl_storage) + > FractionalVotingPower::ONE_THIRD, + ) { + let body: T = super::read::value(wl_storage, &keys.body())?; + Some(body) + } else { + None + } + }; wl_storage.delete(&keys.body())?; wl_storage.delete(&keys.seen())?; wl_storage.delete(&keys.seen_by())?; wl_storage.delete(&keys.voting_power())?; wl_storage.delete(&keys.voting_started_epoch())?; - Ok(()) + Ok(opt_body) } pub fn read( @@ -116,31 +136,68 @@ where mod tests { use std::collections::BTreeMap; - use namada_core::ledger::storage::testing::TestWlStorage; - use namada_core::types::address; + use assert_matches::assert_matches; use namada_core::types::ethereum_events::EthereumEvent; - use namada_core::types::voting_power::FractionalVotingPower; use super::*; + use crate::test_utils; + + #[test] + fn test_delete_expired_tally() { + let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (validator, validator_voting_power) = + test_utils::default_validator(); + + let event = EthereumEvent::TransfersToNamada { + nonce: 0.into(), + transfers: vec![], + }; + let keys = vote_tallies::Keys::from(&event); + + // write some random ethereum event's tally to storage + // with >1/3 voting power behind it + let mut tally = Tally { + voting_power: EpochedVotingPower::from([( + 0.into(), + // store only half of the available voting power, + // which is >1/3 but <=2/3 + FractionalVotingPower::HALF * validator_voting_power, + )]), + seen_by: BTreeMap::from([(validator, 1.into())]), + seen: false, + }; + assert!(write(&mut wl_storage, &keys, &event, &tally, false).is_ok()); + + // delete the tally and check that the body is returned + let opt_body = delete(&mut wl_storage, &keys).unwrap(); + assert_matches!(opt_body, Some(e) if e == event); + + // now, we write another tally, with <=1/3 voting power + tally.voting_power = + EpochedVotingPower::from([(0.into(), 1u64.into())]); + assert!(write(&mut wl_storage, &keys, &event, &tally, false).is_ok()); + + // delete the tally and check that no body is returned + let opt_body = delete(&mut wl_storage, &keys).unwrap(); + assert_matches!(opt_body, None); + } #[test] fn test_write_tally() { - let mut wl_storage = TestWlStorage::default(); + let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (validator, validator_voting_power) = + test_utils::default_validator(); let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), transfers: vec![], - valid_transfers_map: vec![], }; let keys = vote_tallies::Keys::from(&event); let tally = Tally { voting_power: EpochedVotingPower::from([( 0.into(), - FractionalVotingPower::ONE_THIRD, - )]), - seen_by: BTreeMap::from([( - address::testing::established_address_1(), - 10.into(), + validator_voting_power, )]), + seen_by: BTreeMap::from([(validator, 10.into())]), seen: false, }; @@ -175,22 +232,20 @@ mod tests { #[test] fn test_read_tally() { - let mut wl_storage = TestWlStorage::default(); + let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (validator, validator_voting_power) = + test_utils::default_validator(); let event = EthereumEvent::TransfersToNamada { nonce: 0.into(), transfers: vec![], - valid_transfers_map: vec![], }; let keys = vote_tallies::Keys::from(&event); let tally = Tally { voting_power: EpochedVotingPower::from([( 0.into(), - FractionalVotingPower::ONE_THIRD, - )]), - seen_by: BTreeMap::from([( - address::testing::established_address_1(), - 10.into(), + validator_voting_power, )]), + seen_by: BTreeMap::from([(validator, 10.into())]), seen: false, }; wl_storage diff --git a/ethereum_bridge/src/protocol/transactions/votes/update.rs b/ethereum_bridge/src/protocol/transactions/votes/update.rs index 369290ef6b5..c1173bdf128 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -5,7 +5,7 @@ use eyre::{eyre, Result}; use namada_core::ledger::storage::{DBIter, StorageHasher, WlStorage, DB}; use namada_core::types::address::Address; use namada_core::types::storage::BlockHeight; -use namada_core::types::voting_power::FractionalVotingPower; +use namada_core::types::token; use namada_proof_of_stake::pos_queries::PosQueries; use super::{ChangedKeys, EpochedVotingPowerExt, Tally, Votes}; @@ -14,34 +14,33 @@ use crate::storage::vote_tallies; /// Wraps all the information about new votes to be applied to some existing /// tally in storage. pub(in super::super) struct NewVotes { - inner: HashMap, + inner: HashMap, } impl NewVotes { /// Constructs a new [`NewVotes`]. /// - /// For all `votes` provided, a corresponding [`FractionalVotingPower`] must + /// For all `votes` provided, a corresponding [`token::Amount`] must /// be provided in `voting_powers` also, otherwise an error will be /// returned. pub fn new( votes: Votes, - voting_powers: &HashMap<(Address, BlockHeight), FractionalVotingPower>, + voting_powers: &HashMap<(Address, BlockHeight), token::Amount>, ) -> Result { let mut inner = HashMap::default(); for vote in votes { - let fract_voting_power = match voting_powers.get(&vote) { - Some(fract_voting_power) => fract_voting_power, + let voting_power = match voting_powers.get(&vote) { + Some(voting_power) => voting_power, None => { let (address, block_height) = vote; return Err(eyre!( - "No fractional voting power provided for vote by \ - validator {address} at block height {block_height}" + "No voting power provided for vote by validator \ + {address} at block height {block_height}" )); } }; let (address, block_height) = vote; - _ = inner - .insert(address, (block_height, fract_voting_power.to_owned())); + _ = inner.insert(address, (block_height, voting_power.to_owned())); } Ok(Self { inner }) } @@ -71,14 +70,14 @@ impl NewVotes { impl IntoIterator for NewVotes { type IntoIter = std::collections::hash_set::IntoIter; - type Item = (Address, BlockHeight, FractionalVotingPower); + type Item = (Address, BlockHeight, token::Amount); fn into_iter(self) -> Self::IntoIter { let items: HashSet<_> = self .inner .into_iter() - .map(|(address, (block_height, fract_voting_power))| { - (address, block_height, fract_voting_power) + .map(|(address, (block_height, stake))| { + (address, block_height, stake) }) .collect(); items.into_iter() @@ -174,7 +173,7 @@ where .expect("The queried epoch should be known"); let aggregated = voting_power_post .entry(epoch) - .or_insert(FractionalVotingPower::NULL); + .or_insert_with(token::Amount::zero); *aggregated += voting_power; } @@ -213,42 +212,80 @@ mod tests { use namada_core::ledger::storage::testing::TestWlStorage; use namada_core::types::address; use namada_core::types::ethereum_events::EthereumEvent; + use namada_core::types::voting_power::FractionalVotingPower; + use self::helpers::{default_event, default_total_stake, TallyParams}; use super::*; - use crate::protocol::transactions::votes::update::tests::helpers::{ - arbitrary_event, setup_tally, - }; use crate::protocol::transactions::votes::{self, EpochedVotingPower}; + use crate::test_utils; mod helpers { + use namada_proof_of_stake::total_consensus_stake_key_handle; + use super::*; + /// Default amount of staked NAM to be used in tests. + pub(super) fn default_total_stake() -> token::Amount { + // 1000 NAM + token::Amount::native_whole(1_000) + } + /// Returns an arbitrary piece of data that can have votes tallied /// against it. - pub(super) fn arbitrary_event() -> EthereumEvent { + pub(super) fn default_event() -> EthereumEvent { EthereumEvent::TransfersToNamada { nonce: 0.into(), transfers: vec![], - valid_transfers_map: vec![], } } - /// Writes an initial [`Tally`] to storage, based on the passed `votes`. - pub(super) fn setup_tally( - wl_storage: &mut TestWlStorage, - event: &EthereumEvent, - keys: &vote_tallies::Keys, - votes: HashSet<(Address, BlockHeight, FractionalVotingPower)>, - ) -> Result { - let voting_power: FractionalVotingPower = - votes.iter().cloned().map(|(_, _, v)| v).sum(); - let tally = Tally { - voting_power: get_epoched_voting_power(voting_power.to_owned()), - seen_by: votes.into_iter().map(|(a, h, _)| (a, h)).collect(), - seen: voting_power > FractionalVotingPower::TWO_THIRDS, - }; - votes::storage::write(wl_storage, keys, event, &tally, false)?; - Ok(tally) + /// Parameters to construct a test [`Tally`]. + pub(super) struct TallyParams<'a> { + /// Handle to storage. + pub wl_storage: &'a mut TestWlStorage, + /// The event to be voted on. + pub event: &'a EthereumEvent, + /// Votes from the given validators at the given block height. + /// + /// The voting power of each validator is expressed as a fraction + /// of the provided `total_stake` parameter. + pub votes: HashSet<(Address, BlockHeight, token::Amount)>, + /// The [`token::Amount`] staked at epoch 0. + pub total_stake: token::Amount, + } + + impl TallyParams<'_> { + /// Write an initial [`Tally`] to storage. + pub(super) fn setup(self) -> Result { + let Self { + wl_storage, + event, + votes, + total_stake, + } = self; + let keys = vote_tallies::Keys::from(event); + let seen_voting_power: token::Amount = votes + .iter() + .map(|(_, _, voting_power)| *voting_power) + .sum(); + let tally = Tally { + voting_power: get_epoched_voting_power(seen_voting_power), + seen_by: votes + .into_iter() + .map(|(addr, height, _)| (addr, height)) + .collect(), + seen: seen_voting_power + > FractionalVotingPower::TWO_THIRDS * total_stake, + }; + votes::storage::write(wl_storage, &keys, event, &tally, false)?; + total_consensus_stake_key_handle().set( + wl_storage, + total_stake, + 0u64.into(), + 0, + )?; + Ok(tally) + } } } @@ -267,7 +304,8 @@ mod tests { fn test_vote_info_new_single_voter() -> Result<()> { let validator = address::testing::established_address_1(); let vote_height = BlockHeight(100); - let voting_power = FractionalVotingPower::ONE_THIRD; + let voting_power = + FractionalVotingPower::ONE_THIRD * default_total_stake(); let vote = (validator.clone(), vote_height); let votes = Votes::from([vote.clone()]); let voting_powers = HashMap::from([(vote, voting_power)]); @@ -278,7 +316,7 @@ mod tests { let votes: BTreeSet<_> = vote_info.into_iter().collect(); assert_eq!( votes, - BTreeSet::from([(validator, vote_height, voting_power,)]), + BTreeSet::from([(validator, vote_height, voting_power)]), ); Ok(()) } @@ -301,7 +339,8 @@ mod tests { fn test_vote_info_without_voters() -> Result<()> { let validator = address::testing::established_address_1(); let vote_height = BlockHeight(100); - let voting_power = FractionalVotingPower::ONE_THIRD; + let voting_power = + FractionalVotingPower::ONE_THIRD * default_total_stake(); let vote = (validator.clone(), vote_height); let votes = Votes::from([vote.clone()]); let voting_powers = HashMap::from([(vote, voting_power)]); @@ -319,7 +358,8 @@ mod tests { let validator = address::testing::established_address_1(); let new_validator = address::testing::established_address_2(); let vote_height = BlockHeight(100); - let voting_power = FractionalVotingPower::ONE_THIRD; + let voting_power = + FractionalVotingPower::ONE_THIRD * default_total_stake(); let vote = (validator.clone(), vote_height); let votes = Votes::from([vote.clone()]); let voting_powers = HashMap::from([(vote, voting_power)]); @@ -340,23 +380,23 @@ mod tests { let validator = address::testing::established_address_1(); let already_voted_height = BlockHeight(100); - let event = arbitrary_event(); - let keys = vote_tallies::Keys::from(&event); - let tally_pre = setup_tally( - &mut wl_storage, - &event, - &keys, - HashSet::from([( + let event = default_event(); + let tally_pre = TallyParams { + total_stake: default_total_stake(), + wl_storage: &mut wl_storage, + event: &event, + votes: HashSet::from([( validator.clone(), already_voted_height, - FractionalVotingPower::ONE_THIRD, + FractionalVotingPower::ONE_THIRD * default_total_stake(), )]), - )?; + } + .setup()?; let votes = Votes::from([(validator.clone(), BlockHeight(1000))]); let voting_powers = HashMap::from([( (validator, BlockHeight(1000)), - FractionalVotingPower::ONE_THIRD, + FractionalVotingPower::ONE_THIRD * default_total_stake(), )]); let vote_info = NewVotes::new(votes, &voting_powers)?; @@ -371,22 +411,25 @@ mod tests { #[test] fn test_calculate_already_seen() -> Result<()> { let mut wl_storage = TestWlStorage::default(); - let event = arbitrary_event(); + let event = default_event(); let keys = vote_tallies::Keys::from(&event); - let tally_pre = setup_tally( - &mut wl_storage, - &event, - &keys, - HashSet::from([( + let tally_pre = TallyParams { + total_stake: default_total_stake(), + wl_storage: &mut wl_storage, + event: &event, + votes: HashSet::from([( address::testing::established_address_1(), BlockHeight(10), - FractionalVotingPower::new_u64(3, 4)?, // this is > 2/3 + // this is > 2/3 + FractionalVotingPower::new_u64(3, 4)? * default_total_stake(), )]), - )?; + } + .setup()?; let validator = address::testing::established_address_2(); let vote_height = BlockHeight(100); - let voting_power = FractionalVotingPower::ONE_THIRD; + let voting_power = + FractionalVotingPower::new_u64(1, 4)? * default_total_stake(); let vote = (validator, vote_height); let votes = Votes::from([vote.clone()]); let voting_powers = HashMap::from([(vote, voting_power)]); @@ -403,19 +446,20 @@ mod tests { /// Tests that an unchanged tally is returned if no votes are passed. #[test] fn test_calculate_empty() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - let event = arbitrary_event(); + let (mut wl_storage, _) = test_utils::setup_default_storage(); + let event = default_event(); let keys = vote_tallies::Keys::from(&event); - let tally_pre = setup_tally( - &mut wl_storage, - &event, - &keys, - HashSet::from([( + let tally_pre = TallyParams { + total_stake: default_total_stake(), + wl_storage: &mut wl_storage, + event: &event, + votes: HashSet::from([( address::testing::established_address_1(), BlockHeight(10), - FractionalVotingPower::ONE_THIRD, + FractionalVotingPower::ONE_THIRD * default_total_stake(), )]), - )?; + } + .setup()?; let vote_info = NewVotes::new(Votes::default(), &HashMap::default())?; let (tally_post, changed_keys) = @@ -430,24 +474,26 @@ mod tests { /// not yet seen. #[test] fn test_calculate_one_vote_not_seen() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); + let (mut wl_storage, _) = test_utils::setup_default_storage(); - let event = arbitrary_event(); + let event = default_event(); let keys = vote_tallies::Keys::from(&event); - let _tally_pre = setup_tally( - &mut wl_storage, - &event, - &keys, - HashSet::from([( + let _tally_pre = TallyParams { + total_stake: default_total_stake(), + wl_storage: &mut wl_storage, + event: &event, + votes: HashSet::from([( address::testing::established_address_1(), BlockHeight(10), - FractionalVotingPower::ONE_THIRD, + FractionalVotingPower::ONE_THIRD * default_total_stake(), )]), - )?; + } + .setup()?; let validator = address::testing::established_address_2(); let vote_height = BlockHeight(100); - let voting_power = FractionalVotingPower::ONE_THIRD; + let voting_power = + FractionalVotingPower::ONE_THIRD * default_total_stake(); let vote = (validator, vote_height); let votes = Votes::from([vote.clone()]); let voting_powers = HashMap::from([(vote.clone(), voting_power)]); @@ -460,7 +506,7 @@ mod tests { tally_post, Tally { voting_power: get_epoched_voting_power( - FractionalVotingPower::TWO_THIRDS, + FractionalVotingPower::TWO_THIRDS * default_total_stake(), ), seen_by: BTreeMap::from([ (address::testing::established_address_1(), 10.into()), @@ -480,27 +526,33 @@ mod tests { /// seen. #[test] fn test_calculate_one_vote_seen() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); + let (mut wl_storage, _) = test_utils::setup_default_storage(); - let event = arbitrary_event(); + let first_vote_stake = + FractionalVotingPower::ONE_THIRD * default_total_stake(); + let second_vote_stake = + FractionalVotingPower::ONE_THIRD * default_total_stake(); + let total_stake = first_vote_stake + second_vote_stake; + + let event = default_event(); let keys = vote_tallies::Keys::from(&event); - let _tally_pre = setup_tally( - &mut wl_storage, - &event, - &keys, - HashSet::from([( + let _tally_pre = TallyParams { + total_stake, + wl_storage: &mut wl_storage, + event: &event, + votes: HashSet::from([( address::testing::established_address_1(), BlockHeight(10), - FractionalVotingPower::ONE_THIRD, + first_vote_stake, )]), - )?; + } + .setup()?; let validator = address::testing::established_address_2(); let vote_height = BlockHeight(100); - let voting_power = FractionalVotingPower::TWO_THIRDS; let vote = (validator, vote_height); let votes = Votes::from([vote.clone()]); - let voting_powers = HashMap::from([(vote.clone(), voting_power)]); + let voting_powers = HashMap::from([(vote.clone(), second_vote_stake)]); let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = @@ -509,9 +561,7 @@ mod tests { assert_eq!( tally_post, Tally { - voting_power: get_epoched_voting_power( - FractionalVotingPower::WHOLE - ), + voting_power: get_epoched_voting_power(total_stake), seen_by: BTreeMap::from([ (address::testing::established_address_1(), 10.into()), vote, @@ -528,8 +578,10 @@ mod tests { #[test] fn test_keys_changed_all() -> Result<()> { - let voting_power_a = FractionalVotingPower::ONE_THIRD; - let voting_power_b = FractionalVotingPower::TWO_THIRDS; + let voting_power_a = + FractionalVotingPower::ONE_THIRD * default_total_stake(); + let voting_power_b = + FractionalVotingPower::TWO_THIRDS * default_total_stake(); let seen_a = false; let seen_b = true; @@ -543,7 +595,7 @@ mod tests { BlockHeight(20), )]); - let event = arbitrary_event(); + let event = default_event(); let keys = vote_tallies::Keys::from(&event); let pre = Tally { voting_power: get_epoched_voting_power(voting_power_a), @@ -572,11 +624,11 @@ mod tests { BlockHeight(10), )]); - let event = arbitrary_event(); + let event = default_event(); let keys = vote_tallies::Keys::from(&event); let pre = Tally { voting_power: get_epoched_voting_power( - FractionalVotingPower::ONE_THIRD, + FractionalVotingPower::ONE_THIRD * default_total_stake(), ), seen, seen_by, @@ -589,9 +641,7 @@ mod tests { Ok(()) } - fn get_epoched_voting_power( - fraction: FractionalVotingPower, - ) -> EpochedVotingPower { - EpochedVotingPower::from([(0.into(), fraction)]) + fn get_epoched_voting_power(thus_far: token::Amount) -> EpochedVotingPower { + EpochedVotingPower::from([(0.into(), thus_far)]) } } diff --git a/ethereum_bridge/src/storage/eth_bridge_queries.rs b/ethereum_bridge/src/storage/eth_bridge_queries.rs index 5d068d2e38c..5e2f7b63991 100644 --- a/ethereum_bridge/src/storage/eth_bridge_queries.rs +++ b/ethereum_bridge/src/storage/eth_bridge_queries.rs @@ -354,12 +354,16 @@ where } } - /// Query the consensus [`ValidatorSetArgs`] at the given [`Epoch`]. + /// Query a chosen [`ValidatorSetArgs`] at the given [`Epoch`]. /// Also returns a map of each validator's voting power. - pub fn get_validator_set_args( + fn get_validator_set_args( self, epoch: Option, - ) -> (ValidatorSetArgs, VotingPowersMap) { + mut select_validator: F, + ) -> (ValidatorSetArgs, VotingPowersMap) + where + F: FnMut(&EthAddrBook) -> EthAddress, + { let epoch = epoch .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); @@ -377,12 +381,12 @@ where let (validators, voting_powers) = voting_powers_map .get_sorted() .into_iter() - .map(|(&EthAddrBook { hot_key_addr, .. }, &power)| { + .map(|(addr_book, &power)| { let voting_power: EthBridgeVotingPower = FractionalVotingPower::new(power.into(), total_power) .expect("Fractional voting power should be >1") .into(); - (hot_key_addr, voting_power) + (select_validator(addr_book), voting_power) }) .unzip(); @@ -396,6 +400,32 @@ where ) } + /// Query the Bridge [`ValidatorSetArgs`] at the given [`Epoch`]. + /// Also returns a map of each validator's voting power. + #[inline] + pub fn get_bridge_validator_set( + self, + epoch: Option, + ) -> (ValidatorSetArgs, VotingPowersMap) { + self.get_validator_set_args( + epoch, + |&EthAddrBook { hot_key_addr, .. }| hot_key_addr, + ) + } + + /// Query the Governance [`ValidatorSetArgs`] at the given [`Epoch`]. + /// Also returns a map of each validator's voting power. + #[inline] + pub fn get_governance_validator_set( + self, + epoch: Option, + ) -> (ValidatorSetArgs, VotingPowersMap) { + self.get_validator_set_args( + epoch, + |&EthAddrBook { cold_key_addr, .. }| cold_key_addr, + ) + } + /// Check if the token at the given [`EthAddress`] is whitelisted. pub fn is_token_whitelisted(self, &token: &EthAddress) -> bool { let key = whitelist::Key { diff --git a/ethereum_bridge/src/storage/proof.rs b/ethereum_bridge/src/storage/proof.rs index c25033bb9f2..a0287258837 100644 --- a/ethereum_bridge/src/storage/proof.rs +++ b/ethereum_bridge/src/storage/proof.rs @@ -102,14 +102,13 @@ pub fn sort_sigs( impl Encode<1> for EthereumProof<(Epoch, VotingPowersMap)> { fn tokenize(&self) -> [eth_abi::Token; 1] { let signatures = sort_sigs(&self.data.1, &self.signatures); - let (hot_key_addrs, cold_key_addrs, voting_powers) = + let (bridge_validators, governance_validators) = self.data.1.get_abi_encoded(); let (KeccakHash(bridge_hash), KeccakHash(gov_hash)) = valset_upd_toks_to_hashes( self.data.0, - hot_key_addrs, - cold_key_addrs, - voting_powers, + bridge_validators, + governance_validators, ); [eth_abi::Token::Tuple(vec![ eth_abi::Token::FixedBytes(bridge_hash.to_vec()), diff --git a/ethereum_bridge/src/storage/vote_tallies.rs b/ethereum_bridge/src/storage/vote_tallies.rs index 084746adbbe..ec03c498d6a 100644 --- a/ethereum_bridge/src/storage/vote_tallies.rs +++ b/ethereum_bridge/src/storage/vote_tallies.rs @@ -8,8 +8,8 @@ use namada_core::ledger::eth_bridge::ADDRESS; use namada_core::types::address::Address; use namada_core::types::ethereum_events::{EthereumEvent, Uint}; use namada_core::types::hash::Hash; -use namada_core::types::keccak::KeccakHash; -use namada_core::types::storage::{DbKeySeg, Epoch, Key}; +use namada_core::types::keccak::{keccak_hash, KeccakHash}; +use namada_core::types::storage::{BlockHeight, DbKeySeg, Epoch, Key}; use namada_core::types::vote_extensions::validator_set_update::VotingPowersMap; use namada_macros::StorageKeys; @@ -189,7 +189,7 @@ impl From<&Hash> for Keys { /// A wrapper struct for managing keys related to /// tracking signatures over bridge pool roots and nonces. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct BridgePoolRoot(pub BridgePoolRootProof); impl BorshSerialize for BridgePoolRoot { @@ -207,15 +207,23 @@ impl BorshDeserialize for BridgePoolRoot { } } -impl<'a> From<&'a BridgePoolRoot> for Keys { - fn from(bp_root: &BridgePoolRoot) -> Self { - let hash = [bp_root.0.data.0.to_string(), bp_root.0.data.1.to_string()] - .concat(); +impl From<(&BridgePoolRoot, BlockHeight)> for Keys { + fn from( + (BridgePoolRoot(bp_root), root_height): (&BridgePoolRoot, BlockHeight), + ) -> Self { + let hash = { + let (KeccakHash(root), nonce) = &bp_root.data; + + let mut to_hash = [0u8; 64]; + to_hash[..32].copy_from_slice(root); + to_hash[32..].copy_from_slice(&nonce.to_bytes()); + + keccak_hash(to_hash).to_string() + }; let prefix = super::prefix() - .push(&BRIDGE_POOL_ROOT_PREFIX_KEY_SEGMENT.to_owned()) - .expect("should always be able to construct this key") - .push(&hash) - .expect("should always be able to construct this key"); + .with_segment(BRIDGE_POOL_ROOT_PREFIX_KEY_SEGMENT.to_owned()) + .with_segment(root_height) + .with_segment(hash); Keys { prefix, _phantom: std::marker::PhantomData, @@ -223,12 +231,6 @@ impl<'a> From<&'a BridgePoolRoot> for Keys { } } -impl From for Keys { - fn from(bp_root: BridgePoolRoot) -> Self { - Self::from(&bp_root) - } -} - /// Get the key prefix corresponding to the storage location of validator set /// updates whose "seen" state is being tracked. pub fn valset_upds_prefix() -> Key { @@ -263,9 +265,8 @@ mod test { EthereumEvent::TransfersToNamada { nonce: 0.into(), transfers: vec![], - valid_transfers_map: vec![], }, - "9E1736C43D19118E6CE4302118AF337109491ECC52757DFB949BAD6A7940B0C2" + "AB24A95F44CECA5D2AED4B6D056ADDDD8539F44C6CD6CA506534E830C82EA8A8" .to_owned(), ) } diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index aec64633964..9c24e9edfa7 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -17,7 +17,11 @@ use namada_core::types::key::{self, protocol_pk_key, RefTo}; use namada_core::types::storage::{BlockHeight, Key}; use namada_core::types::token; use namada_proof_of_stake::parameters::PosParams; +use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::GenesisValidator; +use namada_proof_of_stake::{ + become_validator, bond_tokens, store_total_consensus_stake, BecomeValidator, +}; use crate::parameters::{ ContractVersion, Contracts, EthereumBridgeConfig, MinimumConfirmations, @@ -69,23 +73,29 @@ pub fn setup_default_storage() (wl_storage, all_keys) } -/// Set up a [`TestWlStorage`] initialized at genesis with a single -/// validator. -/// -/// The validator's address is [`address::testing::established_address_1`]. +/// Set up a [`TestWlStorage`] initialized at genesis with +/// [`default_validator`]. #[inline] pub fn init_default_storage( wl_storage: &mut TestWlStorage, ) -> HashMap { init_storage_with_validators( wl_storage, - HashMap::from_iter([( - address::testing::established_address_1(), - token::Amount::native_whole(100), - )]), + HashMap::from_iter([default_validator()]), ) } +/// Default validator used in tests. +/// +/// The validator's address is [`address::testing::established_address_1`], +/// and its voting power is proportional to the stake of 100 NAM. +#[inline] +pub fn default_validator() -> (Address, token::Amount) { + let addr = address::testing::established_address_1(); + let voting_power = token::Amount::native_whole(100); + (addr, voting_power) +} + /// Writes a dummy [`EthereumBridgeConfig`] to the given [`TestWlStorage`], and /// returns it. pub fn bootstrap_ethereum_bridge( @@ -107,10 +117,6 @@ pub fn bootstrap_ethereum_bridge( address: EthAddress([2; 20]), version: ContractVersion::default(), }, - governance: UpgradeableContract { - address: EthAddress([3; 20]), - version: ContractVersion::default(), - }, }, }; config.init_storage(wl_storage); @@ -213,23 +219,7 @@ pub fn init_storage_with_validators( 0.into(), ) .expect("Test failed"); - let config = EthereumBridgeConfig { - erc20_whitelist: vec![], - eth_start_height: Default::default(), - min_confirmations: Default::default(), - contracts: Contracts { - native_erc20: wnam(), - bridge: UpgradeableContract { - address: EthAddress([42; 20]), - version: Default::default(), - }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: Default::default(), - }, - }, - }; - config.init_storage(wl_storage); + bootstrap_ethereum_bridge(wl_storage); for (validator, keys) in all_keys.iter() { let protocol_key = keys.protocol.ref_to(); @@ -261,3 +251,56 @@ pub fn commit_bridge_pool_root_at_height( storage.commit_block(MockDBWriteBatch).unwrap(); storage.block.tree.delete(&get_key_from_hash(root)).unwrap(); } + +/// Append validators to storage at the current epoch +/// offset by pipeline length. +pub fn append_validators_to_storage( + wl_storage: &mut TestWlStorage, + consensus_validators: HashMap, +) -> HashMap { + let current_epoch = wl_storage.storage.get_current_epoch().0; + + let mut all_keys = HashMap::new(); + let params = wl_storage.pos_queries().get_pos_params(); + + for (validator, stake) in consensus_validators { + let keys = TestValidatorKeys::generate(); + + let consensus_key = &keys.consensus.ref_to(); + let eth_cold_key = &keys.eth_gov.ref_to(); + let eth_hot_key = &keys.eth_bridge.ref_to(); + + become_validator(BecomeValidator { + storage: wl_storage, + params: ¶ms, + address: &validator, + consensus_key, + eth_cold_key, + eth_hot_key, + current_epoch, + commission_rate: Dec::new(5, 2).unwrap(), + max_commission_rate_change: Dec::new(1, 2).unwrap(), + }) + .expect("Test failed"); + bond_tokens(wl_storage, None, &validator, stake, current_epoch) + .expect("Test failed"); + + all_keys.insert(validator, keys); + } + + store_total_consensus_stake( + wl_storage, + current_epoch + params.pipeline_len, + ) + .expect("Test failed"); + + for (validator, keys) in all_keys.iter() { + let protocol_key = keys.protocol.ref_to(); + wl_storage + .write(&protocol_pk_key(validator), protocol_key) + .expect("Test failed"); + } + wl_storage.commit_block().expect("Test failed"); + + all_keys +} diff --git a/genesis/dev.toml b/genesis/dev.toml index f704f5883f3..105bdbda192 100644 --- a/genesis/dev.toml +++ b/genesis/dev.toml @@ -1,8 +1,6 @@ # Developer network genesis_time = "2021-12-20T15:00:00.00Z" native_token = "NAM" -faucet_pow_difficulty = 0 -faucet_withdrawal_limit = "1000" # Some tokens present at genesis. @@ -19,8 +17,6 @@ Christel = "1000000" "Christel.public_key" = "100" Daewon = "1000000" Ester = "1000000" -faucet = "922337203685400000000" -"faucet.public_key" = "100" [token.NAM.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" @@ -37,7 +33,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.BTC.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" @@ -54,7 +49,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.ETH.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" @@ -71,7 +65,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.DOT.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" @@ -88,7 +81,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.Schnitzel.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" @@ -105,7 +97,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.Apfel.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" @@ -123,17 +114,12 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.Kartoffel.parameters] max_reward_rate = "0.1" kd_gain_nom = "0.1" kp_gain_nom = "0.1" locked_ratio_target_key = "0.6667" -# Some established accounts present at genesis. -[established.faucet] -vp = "vp_testnet_faucet" - [established.Albert] vp = "vp_user" @@ -168,10 +154,6 @@ filename = "vp_user.wasm" # filename (relative to wasm path used by the node) filename = "vp_validator.wasm" -# Faucet VP -[wasm.vp_testnet_faucet] -filename = "vp_testnet_faucet.wasm" - # MASP VP [wasm.vp_masp] filename = "vp_masp.wasm" diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 0af2a23b86f..dbf8c1ea763 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -4,8 +4,6 @@ genesis_time = "2021-09-30T10:00:00Z" native_token = "NAM" -faucet_pow_difficulty = 0 -faucet_withdrawal_limit = "1000" [validator.validator-0] # Validator's staked NAM at genesis. @@ -18,7 +16,7 @@ validator_vp = "vp_validator" commission_rate = "0.05" # Maximum change per epoch in the commission rate max_commission_rate_change = "0.01" -# Public IP:port address. +# (Public IP | Hostname):port address. # We set the port to be the default+1000, so that if a local node was running at # the same time as the E2E tests, it wouldn't affect them. net_address = "127.0.0.1:27656" @@ -37,8 +35,6 @@ Christel = "1000000" "Christel.public_key" = "100" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" -"faucet.public_key" = "100" "validator-0.public_key" = "100" [token.BTC] @@ -50,7 +46,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.ETH] address = "atest1v4ehgw36xqmr2d3nx3ryvd2xxgmrq33j8qcns33sxezrgv6zxdzrydjrxveygd2yxumrsdpsf9jc2p" @@ -61,7 +56,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.DOT] address = "atest1v4ehgw36gg6nvs2zgfpyxsfjgc65yv6pxy6nwwfsxgungdzrggeyzv35gveyxsjyxymyz335hur2jn" @@ -72,7 +66,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.Schnitzel] address = "atest1v4ehgw36xue5xvf5xvuyzvpjx5un2v3k8qeyvd3cxdqns32p89rrxd6xx9zngvpegccnzs699rdnnt" @@ -83,7 +76,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.Apfel] address = "atest1v4ehgw36gfryydj9g3p5zv3kg9znyd358ycnzsfcggc5gvecgc6ygs2rxv6ry3zpg4zrwdfeumqcz9" @@ -94,7 +86,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" [token.Kartoffel] address = "atest1v4ehgw36gep5ysecxq6nyv3jg3zygv3e89qn2vp48pryxsf4xpznvve5gvmy23fs89pryvf5a6ht90" @@ -106,11 +97,6 @@ Bertha = "1000000" Christel = "1000000" Daewon = "1000000" Ester = "1000000" -faucet = "9223372036854" - -# Some established accounts present at genesis. -[established.faucet] -vp = "vp_testnet_faucet" [established.Albert] vp = "vp_user" @@ -144,10 +130,6 @@ filename = "vp_user.wasm" # filename (relative to wasm path used by the node) filename = "vp_validator.wasm" -# Faucet VP -[wasm.vp_testnet_faucet] -filename = "vp_testnet_faucet.wasm" - # MASP VP [wasm.vp_masp] filename = "vp_masp.wasm" diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index c18a6262682..0fbbf2231b4 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -4000,7 +4000,9 @@ where } } -fn get_total_consensus_stake( +/// Find the total amount of tokens staked at the given `epoch`, +/// belonging to the set of consensus validators. +pub fn get_total_consensus_stake( storage: &S, epoch: Epoch, params: &PosParams, diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs index 351cf4902c1..ce2e25cf39b 100644 --- a/proof_of_stake/src/pos_queries.rs +++ b/proof_of_stake/src/pos_queries.rs @@ -18,7 +18,7 @@ use thiserror::Error; use crate::types::WeightedValidator; use crate::{ consensus_validator_set_handle, find_validator_by_raw_hash, - read_pos_params, validator_eth_cold_key_handle, + get_total_consensus_stake, read_pos_params, validator_eth_cold_key_handle, validator_eth_hot_key_handle, ConsensusValidatorSet, PosParams, }; @@ -131,10 +131,14 @@ where /// Lookup the total voting power for an epoch (defaulting to the /// epoch of the current yet-to-be-committed block). pub fn get_total_voting_power(self, epoch: Option) -> token::Amount { - self.get_consensus_validators(epoch) - .iter() - .map(|validator| validator.bonded_stake) - .sum::() + let epoch = epoch + .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + let pos_params = self.get_pos_params(); + get_total_consensus_stake(self.wl_storage, epoch, &pos_params) + // NB: the only reason this call should fail is if we request + // an epoch that hasn't been reached yet. let's "fail" by + // returning a total stake of 0 NAM + .unwrap_or_default() } /// Return evidence parameters. diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 783a3498837..47023ab9da0 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -101,7 +101,6 @@ data-encoding.workspace = true derivation-path.workspace = true derivative.workspace = true ethbridge-bridge-contract.workspace = true -ethbridge-governance-contract.workspace = true ethers.workspace = true eyre.workspace = true futures.workspace = true diff --git a/shared/src/ledger/eth_bridge.rs b/shared/src/ledger/eth_bridge.rs index 50d560c2b01..a73f5efd770 100644 --- a/shared/src/ledger/eth_bridge.rs +++ b/shared/src/ledger/eth_bridge.rs @@ -17,6 +17,8 @@ use crate::types::control_flow::time::{ Constant, Duration, Error as TimeoutError, Instant, LinearBackoff, Sleep, }; use crate::types::control_flow::{self, Halt, TryHalt}; +use crate::types::io::Io; +use crate::{display_line, edisplay_line}; const DEFAULT_BACKOFF: Duration = std::time::Duration::from_millis(500); const DEFAULT_CEILING: Duration = std::time::Duration::from_secs(30); @@ -98,7 +100,10 @@ pub struct BlockOnEthSync { } /// Block until Ethereum finishes synchronizing. -pub async fn block_on_eth_sync(client: &C, args: BlockOnEthSync) -> Halt<()> +pub async fn block_on_eth_sync( + client: &C, + args: BlockOnEthSync, +) -> Halt<()> where C: Middleware, { @@ -106,7 +111,7 @@ where deadline, delta_sleep, } = args; - tracing::info!("Attempting to synchronize with the Ethereum network"); + display_line!(IO, "Attempting to synchronize with the Ethereum network"); Sleep { strategy: LinearBackoff { delta: delta_sleep }, } @@ -122,15 +127,18 @@ where }) .await .try_halt(|_| { - tracing::error!("Timed out while waiting for Ethereum to synchronize"); + edisplay_line!( + IO, + "Timed out while waiting for Ethereum to synchronize" + ); })?; - tracing::info!("The Ethereum node is up to date"); + display_line!(IO, "The Ethereum node is up to date"); control_flow::proceed(()) } /// Check if Ethereum has finished synchronizing. In case it has /// not, perform `action`. -pub async fn eth_sync_or( +pub async fn eth_sync_or( client: &C, mut action: F, ) -> Halt> @@ -142,7 +150,8 @@ where .await .map(|status| status.is_synchronized()) .try_halt(|err| { - tracing::error!( + edisplay_line!( + IO, "An error occurred while fetching the Ethereum \ synchronization status: {err}" ); @@ -156,11 +165,11 @@ where /// Check if Ethereum has finished synchronizing. In case it has /// not, end execution. -pub async fn eth_sync_or_exit(client: &C) -> Halt<()> +pub async fn eth_sync_or_exit(client: &C) -> Halt<()> where C: Middleware, { - eth_sync_or(client, || { + eth_sync_or::<_, _, _, IO>(client, || { tracing::error!("The Ethereum node has not finished synchronizing"); }) .await? diff --git a/shared/src/ledger/eth_bridge/bridge_pool.rs b/shared/src/ledger/eth_bridge/bridge_pool.rs index 2b2220a5f9c..b9573cab979 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool.rs @@ -3,7 +3,6 @@ use std::borrow::Cow; use std::cmp::Ordering; use std::collections::HashMap; -use std::io::Write; use std::sync::Arc; use borsh::BorshSerialize; @@ -17,35 +16,38 @@ use serde::{Deserialize, Serialize}; use super::{block_on_eth_sync, eth_sync_or_exit, BlockOnEthSync}; use crate::eth_bridge::ethers::abi::AbiDecode; -use crate::eth_bridge::structs::RelayProof; -use crate::ledger::args; -use crate::ledger::masp::{ShieldedContext, ShieldedUtils}; use crate::ledger::queries::{ - Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, RPC, + Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, + RPC, }; -use crate::ledger::rpc::{query_wasm_code_hash, validate_amount}; -use crate::ledger::tx::prepare_tx; -use crate::ledger::wallet::{Wallet, WalletUtils}; use crate::proto::Tx; +use crate::sdk::args; +use crate::sdk::error::Error; +use crate::sdk::masp::{ShieldedContext, ShieldedUtils}; +use crate::sdk::rpc::{query_wasm_code_hash, validate_amount}; +use crate::sdk::tx::prepare_tx; +use crate::sdk::wallet::{Wallet, WalletUtils}; use crate::types::address::Address; use crate::types::control_flow::time::{Duration, Instant}; use crate::types::control_flow::{ self, install_shutdown_signal, Halt, TryHalt, }; -use crate::types::error::Error; use crate::types::eth_abi::Encode; use crate::types::eth_bridge_pool::{ GasFee, PendingTransfer, TransferToEthereum, TransferToEthereumKind, }; +use crate::types::io::Io; use crate::types::keccak::KeccakHash; use crate::types::token::{Amount, DenominatedAmount}; use crate::types::voting_power::FractionalVotingPower; +use crate::{display, display_line}; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. pub async fn build_bridge_pool_tx< C: crate::ledger::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -65,7 +67,7 @@ pub async fn build_bridge_pool_tx< wrapper_fee_payer: common::PublicKey, ) -> Result<(Tx, Option), Error> { let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); - let DenominatedAmount { amount, .. } = validate_amount( + let DenominatedAmount { amount, .. } = validate_amount::<_, IO>( client, amount, &wrapped_erc20s::token(&asset), @@ -75,7 +77,7 @@ pub async fn build_bridge_pool_tx< .map_err(|e| Error::Other(format!("Failed to validate amount. {}", e)))?; let DenominatedAmount { amount: fee_amount, .. - } = validate_amount(client, fee_amount, &fee_token, tx_args.force) + } = validate_amount::<_, IO>(client, fee_amount, &fee_token, tx_args.force) .await .map_err(|e| { Error::Other(format!( @@ -103,7 +105,7 @@ pub async fn build_bridge_pool_tx< }; let tx_code_hash = - query_wasm_code_hash(client, code_path.to_str().unwrap()) + query_wasm_code_hash::<_, IO>(client, code_path.to_str().unwrap()) .await .unwrap(); @@ -113,7 +115,7 @@ pub async fn build_bridge_pool_tx< // TODO(namada#1800): validate the tx on the client side - let epoch = prepare_tx::( + let epoch = prepare_tx::( client, wallet, shielded, @@ -121,8 +123,6 @@ pub async fn build_bridge_pool_tx< &mut tx, wrapper_fee_payer, None, - #[cfg(not(feature = "mainnet"))] - false, ) .await?; @@ -138,7 +138,7 @@ struct BridgePoolResponse { /// Query the contents of the Ethereum bridge pool. /// Prints out a json payload. -pub async fn query_bridge_pool(client: &C) +pub async fn query_bridge_pool(client: &C) where C: Client + Sync, { @@ -153,19 +153,19 @@ where .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { - println!("Bridge pool is empty."); + display_line!(IO, "Bridge pool is empty."); return; } let contents = BridgePoolResponse { bridge_pool_contents: pool_contents, }; - println!("{}", serde_json::to_string_pretty(&contents).unwrap()); + display_line!(IO, "{}", serde_json::to_string_pretty(&contents).unwrap()); } /// Query the contents of the Ethereum bridge pool that /// is covered by the latest signed root. /// Prints out a json payload. -pub async fn query_signed_bridge_pool( +pub async fn query_signed_bridge_pool( client: &C, ) -> Halt> where @@ -182,13 +182,13 @@ where .map(|transfer| (transfer.keccak256().to_string(), transfer)) .collect(); if pool_contents.is_empty() { - println!("Bridge pool is empty."); + display_line!(IO, "Bridge pool is empty."); return control_flow::halt(); } let contents = BridgePoolResponse { bridge_pool_contents: pool_contents.clone(), }; - println!("{}", serde_json::to_string_pretty(&contents).unwrap()); + display_line!(IO, "{}", serde_json::to_string_pretty(&contents).unwrap()); control_flow::proceed(pool_contents) } @@ -197,7 +197,7 @@ where /// backing each `TransferToEthereum` event. /// /// Prints a json payload. -pub async fn query_relay_progress(client: &C) +pub async fn query_relay_progress(client: &C) where C: Client + Sync, { @@ -207,12 +207,12 @@ where .transfer_to_ethereum_progress(client) .await .unwrap(); - println!("{}", serde_json::to_string_pretty(&resp).unwrap()); + display_line!(IO, "{}", serde_json::to_string_pretty(&resp).unwrap()); } /// Internal methdod to construct a proof that a set of transfers are in the /// bridge pool. -async fn construct_bridge_pool_proof( +async fn construct_bridge_pool_proof( client: &C, args: GenBridgePoolProofReq<'_, '_>, ) -> Halt @@ -242,27 +242,29 @@ where let warning = "Warning".on_yellow(); let warning = warning.bold(); let warning = warning.blink(); - println!( + display_line!( + IO, "{warning}: The following hashes correspond to transfers that \ have surpassed the security threshold in Namada, therefore have \ likely been relayed to Ethereum, but do not yet have a quorum of \ validator signatures behind them in Namada; thus they are still \ in the Bridge pool:\n{warnings:?}", ); - print!("\nDo you wish to proceed? (y/n): "); - std::io::stdout().flush().unwrap(); + display!(IO, "\nDo you wish to proceed? (y/n): "); + IO::flush(); loop { - let mut buffer = String::new(); - let stdin = std::io::stdin(); - stdin.read_line(&mut buffer).try_halt(|e| { - println!("Encountered error reading from STDIN: {e:?}"); + let resp = IO::read().await.try_halt(|e| { + display_line!( + IO, + "Encountered error reading from STDIN: {e:?}" + ); })?; - match buffer.trim() { + match resp.trim() { "y" => break, "n" => return control_flow::halt(), _ => { - print!("Expected 'y' or 'n'. Please try again: "); - std::io::stdout().flush().unwrap(); + display!(IO, "Expected 'y' or 'n'. Please try again: "); + IO::flush(); } } } @@ -276,7 +278,7 @@ where .await; response.map(|response| response.data).try_halt(|e| { - println!("Encountered error constructing proof:\n{e}"); + display_line!(IO, "Encountered error constructing proof:\n{:?}", e); }) } @@ -286,13 +288,13 @@ struct BridgePoolProofResponse { hashes: Vec, relayer_address: Address, total_fees: HashMap, - abi_encoded_proof: Vec, + abi_encoded_args: Vec, } /// Construct a merkle proof of a batch of transfers in /// the bridge pool and return it to the user (as opposed /// to relaying it to ethereum). -pub async fn construct_proof( +pub async fn construct_proof( client: &C, args: args::BridgePoolProof, ) -> Halt<()> @@ -300,9 +302,9 @@ where C: Client + Sync, { let GenBridgePoolProofRsp { - abi_encoded_proof: bp_proof_bytes, + abi_encoded_args, appendices, - } = construct_bridge_pool_proof( + } = construct_bridge_pool_proof::<_, IO>( client, GenBridgePoolProofReq { transfers: args.transfers.as_slice().into(), @@ -330,14 +332,14 @@ where ) }) .unwrap_or_default(), - abi_encoded_proof: bp_proof_bytes, + abi_encoded_args, }; - println!("{}", serde_json::to_string(&resp).unwrap()); + display_line!(IO, "{}", serde_json::to_string(&resp).unwrap()); control_flow::proceed(()) } /// Relay a validator set update, signed off for a given epoch. -pub async fn relay_bridge_pool_proof( +pub async fn relay_bridge_pool_proof( eth_client: Arc, nam_client: &C, args: args::RelayBridgePoolProof, @@ -350,7 +352,7 @@ where let _signal_receiver = args.safe_mode.then(install_shutdown_signal); if args.sync { - block_on_eth_sync( + block_on_eth_sync::<_, IO>( &*eth_client, BlockOnEthSync { deadline: Instant::now() + Duration::from_secs(60), @@ -359,13 +361,12 @@ where ) .await?; } else { - eth_sync_or_exit(&*eth_client).await?; + eth_sync_or_exit::<_, IO>(&*eth_client).await?; } let GenBridgePoolProofRsp { - abi_encoded_proof: bp_proof, - .. - } = construct_bridge_pool_proof( + abi_encoded_args, .. + } = construct_bridge_pool_proof::<_, IO>( nam_client, GenBridgePoolProofReq { transfers: Cow::Owned(args.transfers), @@ -385,7 +386,8 @@ where let error = "Error".on_red(); let error = error.bold(); let error = error.blink(); - println!( + display_line!( + IO, "{error}: Failed to retrieve the Ethereum Bridge smart \ contract address from storage with \ reason:\n{err_msg}\n\nPerhaps the Ethereum bridge is not \ @@ -395,9 +397,13 @@ where } }; - let bp_proof: RelayProof = - AbiDecode::decode(&bp_proof).try_halt(|error| { - println!("Unable to decode the generated proof: {:?}", error); + let (validator_set, signatures, bp_proof): TransferToErcArgs = + AbiDecode::decode(&abi_encoded_args).try_halt(|error| { + display_line!( + IO, + "Unable to decode the generated proof: {:?}", + error + ); })?; // NOTE: this operation costs no gas on Ethereum @@ -410,7 +416,8 @@ where let error = "Error".on_red(); let error = error.bold(); let error = error.blink(); - println!( + display_line!( + IO, "{error}: The Bridge pool nonce in the smart contract is \ {contract_nonce}, while the nonce in Namada is still {}. A \ relay of the former one has already happened, but a proof \ @@ -423,7 +430,8 @@ where let error = "Error".on_red(); let error = error.bold(); let error = error.blink(); - println!( + display_line!( + IO, "{error}: The Bridge pool nonce in the smart contract is \ {contract_nonce}, while the nonce in Namada is still {}. \ Somehow, Namada's nonce is ahead of the contract's nonce!", @@ -433,7 +441,8 @@ where } } - let mut relay_op = bridge.transfer_to_erc(bp_proof); + let mut relay_op = + bridge.transfer_to_erc(validator_set, signatures, bp_proof); if let Some(gas) = args.gas { relay_op.tx.set_gas(gas); } @@ -450,7 +459,7 @@ where .await .unwrap(); - println!("{transf_result:?}"); + display_line!(IO, "{transf_result:?}"); control_flow::proceed(()) } @@ -461,11 +470,13 @@ mod recommendations { use namada_core::types::uint::{self, Uint, I256}; use super::*; + use crate::edisplay_line; use crate::eth_bridge::storage::bridge_pool::{ get_nonce_key, get_signed_root_key, }; use crate::eth_bridge::storage::proof::BridgePoolRootProof; use crate::types::ethereum_events::Uint as EthUint; + use crate::types::io::Io; use crate::types::storage::BlockHeight; use crate::types::vote_extensions::validator_set_update::{ EthAddrBook, VotingPowersMap, VotingPowersMapExt, @@ -547,7 +558,7 @@ mod recommendations { /// Recommend the most economical batch of transfers to relay based /// on a conversion rate estimates from NAM to ETH and gas usage /// heuristics. - pub async fn recommend_batch( + pub async fn recommend_batch( client: &C, args: args::RecommendBatch, ) -> Halt<()> @@ -580,12 +591,15 @@ mod recommendations { ) .await .try_halt(|err| { - eprintln!("Failed to query Bridge pool proof: {err}"); + edisplay_line!( + IO, + "Failed to query Bridge pool proof: {err}" + ); })? .data, ) .try_halt(|err| { - eprintln!("Failed to decode Bridge pool proof: {err}"); + edisplay_line!(IO, "Failed to decode Bridge pool proof: {err}"); })?; // get the latest bridge pool nonce @@ -594,16 +608,20 @@ mod recommendations { .storage_value(client, None, None, false, &get_nonce_key()) .await .try_halt(|err| { - eprintln!("Failed to query Bridge pool nonce: {err}"); + edisplay_line!( + IO, + "Failed to query Bridge pool nonce: {err}" + ); })? .data, ) .try_halt(|err| { - eprintln!("Failed to decode Bridge pool nonce: {err}"); + edisplay_line!(IO, "Failed to decode Bridge pool nonce: {err}"); })?; if latest_bp_nonce != bp_root.data.1 { - eprintln!( + edisplay_line!( + IO, "The signed Bridge pool nonce is not up to date, repeat this \ query at a later time" ); @@ -627,17 +645,17 @@ mod recommendations { + valset_fee() * valset_size; // we don't recommend transfers that have already been relayed - let eligible = generate_eligible( + let eligible = generate_eligible::( &args.conversion_table, &in_progress, - query_signed_bridge_pool(client).await?, + query_signed_bridge_pool::<_, IO>(client).await?, )?; let max_gas = args.max_gas.map(Uint::from_u64).unwrap_or(uint::MAX_VALUE); let max_cost = args.gas.map(I256::from).unwrap_or_default(); - generate_recommendations( + generate_recommendations::( eligible, &args.conversion_table, validator_gas, @@ -651,17 +669,22 @@ mod recommendations { net_profit, bridge_pool_gas_fees, }| { - println!("Recommended batch: {transfer_hashes:#?}"); - println!( + display_line!(IO, "Recommended batch: {transfer_hashes:#?}"); + display_line!( + IO, "Estimated Ethereum transaction gas (in gwei): \ {ethereum_gas_fees}", ); - println!("Estimated net profit (in gwei): {net_profit}"); - println!("Total fees: {bridge_pool_gas_fees:#?}"); + display_line!( + IO, + "Estimated net profit (in gwei): {net_profit}" + ); + display_line!(IO, "Total fees: {bridge_pool_gas_fees:#?}"); }, ) .unwrap_or_else(|| { - println!( + display_line!( + IO, "Unable to find a recommendation satisfying the input \ parameters." ); @@ -705,7 +728,7 @@ mod recommendations { } /// Generate eligible recommendations. - fn generate_eligible( + fn generate_eligible( conversion_table: &HashMap, in_progress: &BTreeSet, signed_pool: HashMap, @@ -721,21 +744,24 @@ mod recommendations { .get(&pending.gas_fee.token) .and_then(|entry| match entry.conversion_rate { r if r == 0.0f64 => { - eprintln!( + edisplay_line!( + IO, "{}: Ignoring null conversion rate", pending.gas_fee.token, ); None } r if r < 0.0f64 => { - eprintln!( + edisplay_line!( + IO, "{}: Ignoring negative conversion rate: {r:.1}", pending.gas_fee.token, ); None } r if r > 1e9 => { - eprintln!( + edisplay_line!( + IO, "{}: Ignoring high conversion rate: {r:.1} > \ 10^9", pending.gas_fee.token, @@ -785,7 +811,7 @@ mod recommendations { /// Generates the actual recommendation from restrictions given by the /// input parameters. - fn generate_recommendations( + fn generate_recommendations( contents: Vec, conversion_table: &HashMap, validator_gas: Uint, @@ -853,6 +879,11 @@ mod recommendations { bridge_pool_gas_fees: total_fees, }) } else { + display_line!( + IO, + "Unable to find a recommendation satisfying the input \ + parameters." + ); None }, ) @@ -882,6 +913,7 @@ mod recommendations { use super::*; use crate::types::control_flow::ProceedOrElse; + use crate::types::io::DefaultIo; /// An established user address for testing & development pub fn bertha_address() -> Address { @@ -987,8 +1019,12 @@ mod recommendations { signed_pool: &mut signed_pool, expected_eligible: &mut expected, }); - let eligible = - generate_eligible(&table, &in_progress, signed_pool).proceed(); + let eligible = generate_eligible::( + &table, + &in_progress, + signed_pool, + ) + .proceed(); assert_eq!(eligible, expected); eligible } @@ -1078,7 +1114,7 @@ mod recommendations { let profitable = vec![transfer(100_000); 17]; let hash = profitable[0].keccak256().to_string(); let expected = vec![hash; 17]; - let recommendation = generate_recommendations( + let recommendation = generate_recommendations::( process_transfers(profitable), &Default::default(), Uint::from_u64(800_000), @@ -1097,7 +1133,7 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); transfers.push(transfer(0)); let expected: Vec<_> = vec![hash; 17]; - let recommendation = generate_recommendations( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(800_000), @@ -1115,7 +1151,7 @@ mod recommendations { let transfers = vec![transfer(75_000); 4]; let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 2]; - let recommendation = generate_recommendations( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(50_000), @@ -1137,7 +1173,7 @@ mod recommendations { .map(|t| t.keccak256().to_string()) .take(5) .collect(); - let recommendation = generate_recommendations( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), @@ -1156,7 +1192,7 @@ mod recommendations { let hash = transfers[0].keccak256().to_string(); let expected = vec![hash; 4]; transfers.extend([transfer(17_500), transfer(17_500)]); - let recommendation = generate_recommendations( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(150_000), @@ -1172,7 +1208,7 @@ mod recommendations { #[test] fn test_wholly_infeasible() { let transfers = vec![transfer(75_000); 4]; - let recommendation = generate_recommendations( + let recommendation = generate_recommendations::( process_transfers(transfers), &Default::default(), Uint::from_u64(300_000), @@ -1253,7 +1289,7 @@ mod recommendations { const VALIDATOR_GAS_FEE: Uint = Uint::from_u64(100_000); - let recommended_batch = generate_recommendations( + let recommended_batch = generate_recommendations::( eligible, &conversion_table, // gas spent by validator signature checks diff --git a/shared/src/ledger/eth_bridge/validator_set.rs b/shared/src/ledger/eth_bridge/validator_set.rs index 7ff30ef07c6..4ae08dd5983 100644 --- a/shared/src/ledger/eth_bridge/validator_set.rs +++ b/shared/src/ledger/eth_bridge/validator_set.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use std::task::Poll; use data_encoding::HEXLOWER; -use ethbridge_governance_contract::Governance; +use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::{self, FutureExt}; use namada_core::hints; @@ -17,13 +17,18 @@ use namada_core::types::storage::Epoch; use super::{block_on_eth_sync, eth_sync_or, eth_sync_or_exit, BlockOnEthSync}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; use crate::eth_bridge::ethers::core::types::TransactionReceipt; -use crate::eth_bridge::structs::{Signature, ValidatorSetArgs}; -use crate::ledger::args; -use crate::ledger::queries::{Client, RPC}; +use crate::eth_bridge::structs::Signature; +use crate::ledger::queries::RPC; +use crate::sdk::args; +use crate::sdk::queries::Client; use crate::types::control_flow::time::{self, Duration, Instant}; use crate::types::control_flow::{ self, install_shutdown_signal, Halt, TryHalt, }; +use crate::types::ethereum_events::EthAddress; +use crate::types::io::{DefaultIo, Io}; +use crate::types::vote_extensions::validator_set_update::ValidatorSetArgs; +use crate::{display_line, edisplay_line}; /// Relayer related errors. #[derive(Debug, Default)] @@ -139,7 +144,7 @@ impl GetStatus for RelayResult { fn is_successful(&self) -> bool { use RelayResult::*; match self { - GovernanceCallError(_) | NonceError { .. } | NoReceipt => false, + BridgeCallError(_) | NonceError { .. } | NoReceipt => false, Receipt { receipt } => receipt.is_successful(), } } @@ -151,7 +156,7 @@ enum CheckNonce {} /// Do not check the nonce of a relay. enum DoNotCheckNonce {} -/// Determine if the nonce in the Governance smart contract prompts +/// Determine if the nonce in the Bridge smart contract prompts /// a relay operation or not. trait ShouldRelay { /// The result of a relay operation. @@ -161,7 +166,7 @@ trait ShouldRelay { type Future<'gov>: Future> + 'gov; /// Returns [`Ok`] if the relay should happen. - fn should_relay(_: Epoch, _: &Governance) -> Self::Future<'_> + fn should_relay(_: Epoch, _: &Bridge) -> Self::Future<'_> where E: Middleware, E::Error: std::fmt::Display; @@ -175,7 +180,7 @@ impl ShouldRelay for DoNotCheckNonce { type RelayResult = Option; #[inline] - fn should_relay(_: Epoch, _: &Governance) -> Self::Future<'_> + fn should_relay(_: Epoch, _: &Bridge) -> Self::Future<'_> where E: Middleware, E::Error: std::fmt::Display, @@ -194,26 +199,23 @@ impl ShouldRelay for CheckNonce { Pin> + 'gov>>; type RelayResult = RelayResult; - fn should_relay( - epoch: Epoch, - governance: &Governance, - ) -> Self::Future<'_> + fn should_relay(epoch: Epoch, bridge: &Bridge) -> Self::Future<'_> where E: Middleware, E::Error: std::fmt::Display, { Box::pin(async move { - let governance_epoch_prep_call = governance.validator_set_nonce(); - let governance_epoch_fut = - governance_epoch_prep_call.call().map(|result| { + let bridge_epoch_prep_call = bridge.validator_set_nonce(); + let bridge_epoch_fut = + bridge_epoch_prep_call.call().map(|result| { result .map_err(|err| { - RelayResult::GovernanceCallError(err.to_string()) + RelayResult::BridgeCallError(err.to_string()) }) .map(|e| Epoch(e.as_u64())) }); - let gov_current_epoch = governance_epoch_fut.await?; + let gov_current_epoch = bridge_epoch_fut.await?; if epoch == gov_current_epoch + 1u64 { Ok(()) } else { @@ -233,15 +235,15 @@ impl ShouldRelay for CheckNonce { /// Relay result for [`CheckNonce`]. enum RelayResult { - /// The call to Governance failed. - GovernanceCallError(String), + /// The call to Bridge failed. + BridgeCallError(String), /// Some nonce related error occurred. /// /// The following comparison must hold: `contract + 1 = argument`. NonceError { /// The value of the [`Epoch`] argument passed via CLI. argument: Epoch, - /// The value of the [`Epoch`] in the Governance contract. + /// The value of the [`Epoch`] in the bridge contract. contract: Epoch, }, /// No receipt was returned from the relay operation. @@ -266,7 +268,7 @@ impl From> for RelayResult { /// Query an ABI encoding of the validator set to be installed /// at the given epoch, and its associated proof. -pub async fn query_validator_set_update_proof( +pub async fn query_validator_set_update_proof( client: &C, args: args::ValidatorSetProof, ) where @@ -285,14 +287,15 @@ pub async fn query_validator_set_update_proof( .await .unwrap(); - println!("0x{}", HEXLOWER.encode(encoded_proof.as_ref())); + display_line!(IO, "0x{}", HEXLOWER.encode(encoded_proof.as_ref())); } -/// Query an ABI encoding of the validator set at a given epoch. -pub async fn query_validator_set_args( +/// Query an ABI encoding of the Bridge validator set at a given epoch. +pub async fn query_bridge_validator_set( client: &C, - args: args::ConsensusValidatorSet, -) where + args: args::BridgeValidatorSet, +) -> Halt<()> +where C: Client + Sync, { let epoch = if let Some(epoch) = args.epoch { @@ -301,18 +304,83 @@ pub async fn query_validator_set_args( RPC.shell().epoch(client).await.unwrap() }; - let encoded_validator_set_args = RPC + let args = RPC .shell() .eth_bridge() - .read_consensus_valset(client, &epoch) + .read_bridge_valset(client, &epoch) .await - .unwrap(); + .try_halt(|err| { + tracing::error!(%err, "Failed to fetch Bridge validator set"); + })?; + + display_validator_set::(args); + control_flow::proceed(()) +} + +/// Query an ABI encoding of the Governance validator set at a given epoch. +pub async fn query_governnace_validator_set( + client: &C, + args: args::GovernanceValidatorSet, +) -> Halt<()> +where + C: Client + Sync, +{ + let epoch = if let Some(epoch) = args.epoch { + epoch + } else { + RPC.shell().epoch(client).await.unwrap() + }; + + let args = RPC + .shell() + .eth_bridge() + .read_governance_valset(client, &epoch) + .await + .try_halt(|err| { + tracing::error!(%err, "Failed to fetch Governance validator set"); + })?; + + display_validator_set::(args); + control_flow::proceed(()) +} - println!("0x{}", HEXLOWER.encode(encoded_validator_set_args.as_ref())); +/// Display the given [`ValidatorSetArgs`]. +fn display_validator_set(args: ValidatorSetArgs) { + use serde::Serialize; + + #[derive(Serialize)] + struct Validator { + addr: EthAddress, + voting_power: u128, + } + + #[derive(Serialize)] + struct ValidatorSet { + set: Vec, + } + + let ValidatorSetArgs { + validators, + voting_powers, + .. + } = args; + let validator_set = ValidatorSet { + set: validators + .into_iter() + .zip(voting_powers.into_iter().map(u128::from)) + .map(|(addr, voting_power)| Validator { addr, voting_power }) + .collect(), + }; + + display_line!( + IO, + "{}", + serde_json::to_string_pretty(&validator_set).unwrap() + ); } /// Relay a validator set update, signed off for a given epoch. -pub async fn relay_validator_set_update( +pub async fn relay_validator_set_update( eth_client: Arc, nam_client: &C, args: args::ValidatorSetUpdateRelay, @@ -325,7 +393,7 @@ where let mut signal_receiver = args.safe_mode.then(install_shutdown_signal); if args.sync { - block_on_eth_sync( + block_on_eth_sync::<_, IO>( &*eth_client, BlockOnEthSync { deadline: Instant::now() + Duration::from_secs(60), @@ -334,7 +402,7 @@ where ) .await?; } else { - eth_sync_or_exit(&*eth_client).await?; + eth_sync_or_exit::<_, IO>(&*eth_client).await?; } if args.daemon { @@ -351,8 +419,11 @@ where eth_client, nam_client, |relay_result| match relay_result { - RelayResult::GovernanceCallError(reason) => { - tracing::error!(reason, "Calling Governance failed"); + RelayResult::BridgeCallError(reason) => { + edisplay_line!( + IO, + "Calling Bridge failed due to: {reason}" + ); } RelayResult::NonceError { argument, contract } => { let whence = match argument.cmp(&contract) { @@ -360,22 +431,31 @@ where Ordering::Equal => "identical to", Ordering::Greater => "too far ahead of", }; - tracing::error!( - ?argument, - ?contract, - "Argument nonce is {whence} contract nonce" + edisplay_line!( + IO, + "Argument nonce <{argument}> is {whence} contract \ + nonce <{contract}>" ); } RelayResult::NoReceipt => { - tracing::warn!( + edisplay_line!( + IO, "No transfer receipt received from the Ethereum node" ); } RelayResult::Receipt { receipt } => { if receipt.is_successful() { - tracing::info!(?receipt, "Ethereum transfer succeeded"); + display_line!( + IO, + "Ethereum transfer succeeded: {:?}", + receipt + ); } else { - tracing::error!(?receipt, "Ethereum transfer failed"); + display_line!( + IO, + "Ethereum transfer failed: {:?}", + receipt + ); } } }, @@ -433,7 +513,9 @@ where time::sleep(sleep_for).await; let is_synchronizing = - eth_sync_or(&*eth_client, || ()).await.is_break(); + eth_sync_or::<_, _, _, DefaultIo>(&*eth_client, || ()) + .await + .is_break(); if is_synchronizing { tracing::debug!("The Ethereum node is synchronizing"); last_call_succeeded = false; @@ -441,27 +523,25 @@ where } // we could be racing against governance updates, - // so it is best to always fetch the latest governance + // so it is best to always fetch the latest Bridge // contract address - let governance = - get_governance_contract(nam_client, Arc::clone(ð_client)) - .await - .try_halt(|err| { - // only care about displaying errors, - // exit on all circumstances - _ = err.handle(); - })?; - let governance_epoch_prep_call = governance.validator_set_nonce(); - let governance_epoch_fut = - governance_epoch_prep_call.call().map(|result| { - result - .map_err(|err| { - tracing::error!( - "Failed to fetch latest validator set nonce: {err}" - ); - }) - .map(|e| e.as_u64() as i128) - }); + let bridge = get_bridge_contract(nam_client, Arc::clone(ð_client)) + .await + .try_halt(|err| { + // only care about displaying errors, + // exit on all circumstances + _ = err.handle(); + })?; + let bridge_epoch_prep_call = bridge.validator_set_nonce(); + let bridge_epoch_fut = bridge_epoch_prep_call.call().map(|result| { + result + .map_err(|err| { + tracing::error!( + "Failed to fetch latest validator set nonce: {err}" + ); + }) + .map(|e| e.as_u64() as i128) + }); let shell = RPC.shell(); let nam_current_epoch_fut = shell.epoch(nam_client).map(|result| { @@ -475,7 +555,7 @@ where }); let (nam_current_epoch, gov_current_epoch) = - futures::try_join!(nam_current_epoch_fut, governance_epoch_fut) + futures::try_join!(nam_current_epoch_fut, bridge_epoch_fut) .try_halt(|()| ())?; tracing::debug!( @@ -486,11 +566,11 @@ where let new_epoch = match nam_current_epoch - gov_current_epoch { // NB: a namada epoch should always be one behind the nonce - // in the governance contract, for the latter to be considered + // in the bridge contract, for the latter to be considered // up to date -1 => { tracing::debug!( - "Nothing to do, since the validator set in the Governance \ + "Nothing to do, since the validator set in the Bridge \ contract is up to date", ); last_call_succeeded = false; @@ -504,7 +584,7 @@ where // NB: if the nonce difference is lower than 0, somehow the state // of namada managed to fall behind the state of the smart contract _ => { - tracing::error!("The Governance contract is ahead of Namada!"); + tracing::error!("The Bridge contract is ahead of Namada!"); last_call_succeeded = false; continue; } @@ -541,21 +621,21 @@ where } } -async fn get_governance_contract( +async fn get_bridge_contract( nam_client: &C, eth_client: Arc, -) -> Result, Error> +) -> Result, Error> where C: Client + Sync, E: Middleware, { - let governance_contract = RPC + let bridge_contract = RPC .shell() .eth_bridge() - .read_governance_contract(nam_client) + .read_bridge_contract(nam_client) .await .map_err(|err| Error::critical(err.to_string()))?; - Ok(Governance::new(governance_contract.address, eth_client)) + Ok(Bridge::new(bridge_contract.address, eth_client)) } async fn relay_validator_set_update_once( @@ -593,17 +673,17 @@ where let bridge_current_epoch = epoch_to_relay - 1; let shell = RPC.shell().eth_bridge(); - let encoded_validator_set_args_fut = - shell.read_consensus_valset(nam_client, &bridge_current_epoch); + let validator_set_args_fut = + shell.read_bridge_valset(nam_client, &bridge_current_epoch); let shell = RPC.shell().eth_bridge(); - let governance_address_fut = shell.read_governance_contract(nam_client); + let bridge_address_fut = shell.read_bridge_contract(nam_client); - let (encoded_proof, encoded_validator_set_args, governance_contract) = + let (encoded_proof, validator_set_args, bridge_contract) = futures::try_join!( encoded_proof_fut, - encoded_validator_set_args_fut, - governance_address_fut + validator_set_args_fut, + bridge_address_fut ) .map_err(|err| R::try_recover(err.to_string()))?; @@ -612,22 +692,19 @@ where [u8; 32], Vec, ) = abi_decode_struct(encoded_proof); - let consensus_set: ValidatorSetArgs = - abi_decode_struct(encoded_validator_set_args); - let governance = Governance::new(governance_contract.address, eth_client); + let bridge = Bridge::new(bridge_contract.address, eth_client); - if let Err(result) = R::should_relay(epoch_to_relay, &governance).await { + if let Err(result) = R::should_relay(epoch_to_relay, &bridge).await { action(result); return Err(Error::NoContext); } - let mut relay_op = governance.update_validators_set( - consensus_set, + let mut relay_op = bridge.update_validator_set( + validator_set_args.into(), bridge_hash, gov_hash, signatures, - epoch_to_relay.0.into(), ); if let Some(gas) = args.gas { relay_op.tx.set_gas(gas); @@ -688,7 +765,7 @@ mod tests { }) .is_successful() ); - assert!(!RelayResult::GovernanceCallError("".into()).is_successful()); + assert!(!RelayResult::BridgeCallError("".into()).is_successful()); assert!( !RelayResult::NonceError { contract: 0.into(), diff --git a/shared/src/ledger/events.rs b/shared/src/ledger/events.rs index fd2b2df3f6b..ff5b9f108d3 100644 --- a/shared/src/ledger/events.rs +++ b/shared/src/ledger/events.rs @@ -11,8 +11,8 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde_json::Value; use crate::ledger::governance::utils::ProposalEvent; +use crate::sdk::error::{EncodingError, Error, EventError}; use crate::tendermint_proto::abci::EventAttribute; -use crate::types::error::{EncodingError, Error, EventError}; use crate::types::ibc::IbcEvent; #[cfg(feature = "ferveo-tpke")] use crate::types::transaction::TxType; diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index 4e02cb229a5..2d7ebf18f79 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -725,7 +725,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let ctx = Ctx::new( &ADDRESS, @@ -1037,7 +1038,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let gas_meter = VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), @@ -1370,7 +1372,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let gas_meter = VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), @@ -1457,7 +1460,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let gas_meter = VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), @@ -1581,7 +1585,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let gas_meter = VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), @@ -1704,7 +1709,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let gas_meter = VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), @@ -1812,7 +1818,8 @@ mod tests { outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( vec![*outer_tx.code_sechash(), *outer_tx.data_sechash()], - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); let gas_meter = VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index a5b2a06819e..04b5809bc27 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -1,25 +1,18 @@ //! The ledger modules -pub mod args; pub mod eth_bridge; pub mod events; pub mod governance; pub mod ibc; pub mod inflation; -pub mod masp; pub mod native_vp; pub mod pgf; pub mod pos; #[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] pub mod protocol; pub mod queries; -pub mod rpc; -pub mod signing; pub mod storage; -#[allow(clippy::result_large_err)] -pub mod tx; pub mod vp_host_fns; -pub mod wallet; pub use namada_core::ledger::{ gas, parameters, replay_protection, storage_api, tx_env, vp_env, diff --git a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 89b1b332536..5efea15ac52 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -387,6 +387,7 @@ where escrow_account: &BRIDGE_POOL_ADDRESS, expected_debit: expected_gas_debit, expected_credit: expected_gas_credit, + transferred_amount: &transfer.gas_fee.amount, _kind: PhantomData, }, token_check: EscrowDelta { @@ -395,6 +396,7 @@ where escrow_account: token_check_escrow_acc, expected_debit: expected_token_debit, expected_credit: expected_token_credit, + transferred_amount: &transfer.transfer.amount, _kind: PhantomData, }, }) @@ -409,21 +411,72 @@ struct EscrowDelta<'a, KIND> { escrow_account: &'a Address, expected_debit: Amount, expected_credit: Amount, + transferred_amount: &'a Amount, _kind: PhantomData<*const KIND>, } impl EscrowDelta<'_, KIND> { - fn validate_changed_keys(&self, changed_keys: &BTreeSet) -> bool { + /// Validate an [`EscrowDelta`]. + /// + /// # Conditions for validation + /// + /// If the transferred amount in the [`EscrowDelta`] is nil, + /// then no keys could have been changed. If the transferred + /// amount is greater than zero, then the appropriate escrow + /// keys must have been written to by some wasm tx. + #[inline] + fn validate(&self, changed_keys: &BTreeSet) -> bool { + if hints::unlikely(self.transferred_amount_is_nil()) { + self.check_escrow_keys_unchanged(changed_keys) + } else { + self.check_escrow_keys_changed(changed_keys) + } + } + + /// Check if all required escrow keys in `changed_keys` were modified. + #[inline] + fn check_escrow_keys_changed(&self, changed_keys: &BTreeSet) -> bool { let EscrowDelta { token, payer_account, escrow_account, .. } = self; + let owner_key = balance_key(token, payer_account); let escrow_key = balance_key(token, escrow_account); + changed_keys.contains(&owner_key) && changed_keys.contains(&escrow_key) } + + /// Check if no escrow keys in `changed_keys` were modified. + #[inline] + fn check_escrow_keys_unchanged( + &self, + changed_keys: &BTreeSet, + ) -> bool { + let EscrowDelta { + token, + payer_account, + escrow_account, + .. + } = self; + + let owner_key = balance_key(token, payer_account); + let escrow_key = balance_key(token, escrow_account); + + !changed_keys.contains(&owner_key) + && !changed_keys.contains(&escrow_key) + } + + /// Check if the amount transferred to escrow is nil. + #[inline] + fn transferred_amount_is_nil(&self) -> bool { + let EscrowDelta { + transferred_amount, .. + } = self; + transferred_amount.is_zero() + } } /// There are two checks we must do when minting wNam. @@ -437,9 +490,9 @@ struct EscrowCheck<'a> { impl EscrowCheck<'_> { #[inline] - fn validate_changed_keys(&self, changed_keys: &BTreeSet) -> bool { - self.gas_check.validate_changed_keys(changed_keys) - && self.token_check.validate_changed_keys(changed_keys) + fn validate(&self, changed_keys: &BTreeSet) -> bool { + self.gas_check.validate(changed_keys) + && self.token_check.validate(changed_keys) } } @@ -541,7 +594,7 @@ where let wnam_address = read_native_erc20_address(&self.ctx.pre())?; let escrow_checks = self.determine_escrow_checks(&wnam_address, &transfer)?; - if !escrow_checks.validate_changed_keys(keys_changed) { + if !escrow_checks.validate(keys_changed) { tracing::debug!( ?transfer, "Missing storage modifications in the Bridge pool" @@ -860,10 +913,6 @@ mod test_bridge_pool_vp { address: EthAddress([42; 20]), version: Default::default(), }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: Default::default(), - }, }, }; let mut wl_storage = WlStorage { @@ -1849,4 +1898,94 @@ mod test_bridge_pool_vp { Expect::True, ); } + + /// Test that the Bridge pool native VP validates transfers that + /// do not contain gas fees and no associated changed keys. + #[test] + fn test_no_gas_fees_with_no_changed_keys() { + let nam_addr = nam(); + let delta = EscrowDelta { + token: Cow::Borrowed(&nam_addr), + payer_account: &bertha_address(), + escrow_account: &BRIDGE_ADDRESS, + expected_debit: Amount::zero(), + expected_credit: Amount::zero(), + // NOTE: testing 0 amount + transferred_amount: &Amount::zero(), + // NOTE: testing gas fees + _kind: PhantomData::<*const GasCheck>, + }; + // NOTE: testing no changed keys + let empty_keys = BTreeSet::new(); + + assert!(delta.validate(&empty_keys)); + } + + /// Test that the Bridge pool native VP rejects transfers that + /// do not contain gas fees and has associated changed keys. + #[test] + fn test_no_gas_fees_with_changed_keys() { + let nam_addr = nam(); + let delta = EscrowDelta { + token: Cow::Borrowed(&nam_addr), + payer_account: &bertha_address(), + escrow_account: &BRIDGE_ADDRESS, + expected_debit: Amount::zero(), + expected_credit: Amount::zero(), + // NOTE: testing 0 amount + transferred_amount: &Amount::zero(), + // NOTE: testing gas fees + _kind: PhantomData::<*const GasCheck>, + }; + let owner_key = balance_key(&nam_addr, &bertha_address()); + // NOTE: testing changed keys + let some_changed_keys = BTreeSet::from([owner_key]); + + assert!(!delta.validate(&some_changed_keys)); + } + + /// Test that the Bridge pool native VP validates transfers + /// moving no value and with no associated changed keys. + #[test] + fn test_no_amount_with_no_changed_keys() { + let nam_addr = nam(); + let delta = EscrowDelta { + token: Cow::Borrowed(&nam_addr), + payer_account: &bertha_address(), + escrow_account: &BRIDGE_ADDRESS, + expected_debit: Amount::zero(), + expected_credit: Amount::zero(), + // NOTE: testing 0 amount + transferred_amount: &Amount::zero(), + // NOTE: testing token transfers + _kind: PhantomData::<*const TokenCheck>, + }; + // NOTE: testing no changed keys + let empty_keys = BTreeSet::new(); + + assert!(delta.validate(&empty_keys)); + } + + /// Test that the Bridge pool native VP rejects transfers + /// moving no value and with associated changed keys. + #[test] + fn test_no_amount_with_changed_keys() { + let nam_addr = nam(); + let delta = EscrowDelta { + token: Cow::Borrowed(&nam_addr), + payer_account: &bertha_address(), + escrow_account: &BRIDGE_ADDRESS, + expected_debit: Amount::zero(), + expected_credit: Amount::zero(), + // NOTE: testing 0 amount + transferred_amount: &Amount::zero(), + // NOTE: testing token transfers + _kind: PhantomData::<*const TokenCheck>, + }; + let owner_key = balance_key(&nam_addr, &bertha_address()); + // NOTE: testing changed keys + let some_changed_keys = BTreeSet::from([owner_key]); + + assert!(!delta.validate(&some_changed_keys)); + } } diff --git a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs index 650c065ab7b..4d006229a13 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -236,10 +236,6 @@ mod tests { address: EthAddress([42; 20]), version: Default::default(), }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: Default::default(), - }, }, }; config.init_storage(&mut wl_storage); diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index f6dbb82dba7..7fb23373044 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -496,8 +496,6 @@ where self.keys_changed, &eval_runner, &mut vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - false, ); match eval_runner.eval_native_result(ctx, vp_code_hash, input_data) { diff --git a/shared/src/ledger/native_vp/multitoken.rs b/shared/src/ledger/native_vp/multitoken.rs index 0d42e86eaa7..564024fd8f3 100644 --- a/shared/src/ledger/native_vp/multitoken.rs +++ b/shared/src/ledger/native_vp/multitoken.rs @@ -171,7 +171,8 @@ mod tests { tx.set_data(Data::new(tx_data)); tx.add_section(Section::Signature(Signature::new( tx.sechashes(), - &keypair_1(), + [(0, keypair_1())].into_iter().collect(), + None, ))); tx } diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index c6ed0814b17..ceb41a36afa 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -147,7 +147,6 @@ pub fn dispatch_tx<'a, D, H, CA>( vp_wasm_cache: &'a mut VpCache, tx_wasm_cache: &'a mut TxCache, block_proposer: Option<&'a Address>, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -156,10 +155,7 @@ where { match tx.header().tx_type { TxType::Raw => Err(Error::TxTypeError), - TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow, - }) => apply_wasm_tx( + TxType::Decrypted(DecryptedTx::Decrypted) => apply_wasm_tx( tx, &tx_index, ShellParams { @@ -168,27 +164,14 @@ where vp_wasm_cache, tx_wasm_cache, }, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, ), TxType::Protocol(protocol_tx) => { apply_protocol_tx(protocol_tx.tx, tx.data(), wl_storage) } TxType::Wrapper(ref wrapper) => { - let masp_transaction = - wrapper.unshield_section_hash.and_then(|ref hash| { - tx.get_section(hash).and_then(|section| { - if let Section::MaspTx(transaction) = section.as_ref() { - Some(transaction.to_owned()) - } else { - None - } - }) - }); - let changed_keys = apply_wrapper_tx( wrapper, - masp_transaction, + get_fee_unshielding_transaction(&tx, wrapper), tx_bytes, ShellParams { tx_gas_meter, @@ -197,8 +180,6 @@ where tx_wasm_cache, }, block_proposer, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, )?; Ok(TxResult { gas_used: tx_gas_meter.get_tx_consumed_gas(), @@ -242,7 +223,6 @@ pub(crate) fn apply_wrapper_tx<'a, D, H, CA, WLS>( tx_bytes: &[u8], mut shell_params: ShellParams<'a, CA, WLS>, block_proposer: Option<&Address>, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> Result> where CA: 'static + WasmCacheAccess + Sync, @@ -269,8 +249,6 @@ where wrapper, fee_unshield_transaction, &mut shell_params, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, block_proposer, &mut changed_keys, )?; @@ -291,6 +269,24 @@ where Ok(changed_keys) } +/// Retrieve the Masp `Transaction` for fee unshielding from the provided +/// transaction, if present +pub fn get_fee_unshielding_transaction( + tx: &Tx, + wrapper: &WrapperTx, +) -> Option { + wrapper + .unshield_section_hash + .and_then(|ref hash| tx.get_section(hash)) + .and_then(|section| { + if let Section::MaspTx(transaction) = section.as_ref() { + Some(transaction.to_owned()) + } else { + None + } + }) +} + /// Charge fee for the provided wrapper transaction. In ABCI returns an error if /// the balance of the block proposer overflows. In ABCI plus returns error if: /// - The unshielding fails @@ -301,7 +297,6 @@ pub fn charge_fee<'a, D, H, CA, WLS>( wrapper: &WrapperTx, masp_transaction: Option, shell_params: &mut ShellParams<'a, CA, WLS>, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, block_proposer: Option<&Address>, changed_keys: &mut BTreeSet, ) -> Result<()> @@ -354,8 +349,6 @@ where vp_wasm_cache, tx_wasm_cache, }, - #[cfg(not(feature = "mainnet"))] - false, ) { Ok(result) => { // NOTE: do not commit yet cause this could be @@ -385,19 +378,8 @@ where // Charge or check fees match block_proposer { - Some(proposer) => transfer_fee( - *wl_storage, - proposer, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, - wrapper, - )?, - None => check_fees( - *wl_storage, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, - wrapper, - )?, + Some(proposer) => transfer_fee(*wl_storage, proposer, wrapper)?, + None => check_fees(*wl_storage, wrapper)?, } changed_keys.extend(wl_storage.write_log_mut().get_keys_with_precommit()); @@ -413,7 +395,6 @@ where pub fn transfer_fee( wl_storage: &mut WLS, block_proposer: &Address, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, wrapper: &WrapperTx, ) -> Result<()> where @@ -439,45 +420,31 @@ where .map_err(|e| Error::FeeError(e.to_string())) } else { // Balance was insufficient for fee payment - #[cfg(not(feature = "mainnet"))] - let reject = !has_valid_pow; - #[cfg(feature = "mainnet")] - let reject = true; - - if reject { - #[cfg(not(any(feature = "abciplus", feature = "abcipp")))] - { - // Move all the available funds in the transparent - // balance of the fee payer - token_transfer( - wl_storage, - &wrapper.fee.token, - &wrapper.fee_payer(), - block_proposer, - balance, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + #[cfg(not(any(feature = "abciplus", feature = "abcipp")))] + { + // Move all the available funds in the transparent + // balance of the fee payer + token_transfer( + wl_storage, + &wrapper.fee.token, + &wrapper.fee_payer(), + block_proposer, + balance, + ) + .map_err(|e| Error::FeeError(e.to_string()))?; - return Err(Error::FeeError( - "Transparent balance of wrapper's signer was \ - insufficient to pay fee. All the available \ - transparent funds have been moved to the block \ - proposer" - .to_string(), - )); - } - #[cfg(any(feature = "abciplus", feature = "abcipp"))] return Err(Error::FeeError( - "Insufficient transparent balance to pay fees" + "Transparent balance of wrapper's signer was \ + insufficient to pay fee. All the available \ + transparent funds have been moved to the block \ + proposer" .to_string(), )); - } else { - tracing::debug!( - "Balance was insufficient for fee payment but a valid \ - PoW was provided" - ); - Ok(()) } + #[cfg(any(feature = "abciplus", feature = "abcipp"))] + return Err(Error::FeeError( + "Insufficient transparent balance to pay fees".to_string(), + )); } } Err(e) => { @@ -564,11 +531,7 @@ where } /// Check if the fee payer has enough transparent balance to pay fees -pub fn check_fees( - wl_storage: &WLS, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, - wrapper: &WrapperTx, -) -> Result<()> +pub fn check_fees(wl_storage: &WLS, wrapper: &WrapperTx) -> Result<()> where WLS: WriteLogAndStorage + StorageRead, { @@ -586,23 +549,9 @@ where if balance.checked_sub(fees).is_some() { Ok(()) } else { - // Balance was insufficient for fee payment - #[cfg(not(feature = "mainnet"))] - let reject = !has_valid_pow; - #[cfg(feature = "mainnet")] - let reject = true; - - if reject { - Err(Error::FeeError( - "Insufficient transparent balance to pay fees".to_string(), - )) - } else { - tracing::debug!( - "Balance was insufficient for fee payment but a valid PoW was \ - provided" - ); - Ok(()) - } + Err(Error::FeeError( + "Insufficient transparent balance to pay fees".to_string(), + )) } } @@ -612,7 +561,6 @@ pub fn apply_wasm_tx<'a, D, H, CA, WLS>( tx: Tx, tx_index: &TxIndex, shell_params: ShellParams<'a, CA, WLS>, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> Result where CA: 'static + WasmCacheAccess + Sync, @@ -656,8 +604,6 @@ where write_log, verifiers_from_tx: &verifiers, vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, })?; let gas_used = tx_gas_meter.get_tx_consumed_gas(); @@ -798,8 +744,6 @@ where write_log: &'a WriteLog, verifiers_from_tx: &'a BTreeSet
, vp_wasm_cache: &'a mut VpCache, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: bool, } /// Check the acceptance of a transaction by validity predicates @@ -812,8 +756,6 @@ fn check_vps( write_log, verifiers_from_tx, vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, }: CheckVps<'_, D, H, CA>, ) -> Result where @@ -833,7 +775,6 @@ where write_log, tx_gas_meter, vp_wasm_cache, - has_valid_pow, )?; tracing::debug!("Total VPs gas cost {:?}", vps_result.gas_used); @@ -853,10 +794,6 @@ fn execute_vps( write_log: &WriteLog, tx_gas_meter: &TxGasMeter, vp_wasm_cache: &mut VpCache, - #[cfg(not(feature = "mainnet"))] - // This is true when the wrapper of this tx contained a valid - // `testnet_pow::Solution` - has_valid_pow: bool, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -882,7 +819,7 @@ where // the first signature verification (if any) is accounted // twice wasm::run::vp( - &vp_code_hash, + vp_code_hash, tx, tx_index, addr, @@ -892,8 +829,6 @@ where &keys_changed, &verifiers, vp_wasm_cache.clone(), - #[cfg(not(feature = "mainnet"))] - has_valid_pow, ) .map_err(Error::VpRunnerError) } @@ -1128,10 +1063,13 @@ mod tests { fn test_apply_protocol_tx_duplicate_eth_events_vext() -> Result<()> { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); + let validator_a_stake = Amount::native_whole(100); + let validator_b_stake = Amount::native_whole(100); + let total_stake = validator_a_stake + validator_b_stake; let (mut wl_storage, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ - (validator_a.clone(), Amount::native_whole(100)), - (validator_b, Amount::native_whole(100)), + (validator_a.clone(), validator_a_stake), + (validator_b, validator_b_stake), ]), ); let event = EthereumEvent::TransfersToNamada { @@ -1141,7 +1079,6 @@ mod tests { asset: DAI_ERC20_ETH_ADDRESS, receiver: address::testing::established_address_4(), }], - valid_transfers_map: vec![true], }; let vext = EthereumEventsVext { block_height: BlockHeight(100), @@ -1166,8 +1103,10 @@ mod tests { // the vote should have only be applied once let voting_power: EpochedVotingPower = wl_storage.read(ð_msg_keys.voting_power())?.unwrap(); - let expected = - EpochedVotingPower::from([(0.into(), FractionalVotingPower::HALF)]); + let expected = EpochedVotingPower::from([( + 0.into(), + FractionalVotingPower::HALF * total_stake, + )]); assert_eq!(voting_power, expected); Ok(()) @@ -1180,10 +1119,13 @@ mod tests { fn test_apply_protocol_tx_duplicate_bp_roots_vext() -> Result<()> { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); + let validator_a_stake = Amount::native_whole(100); + let validator_b_stake = Amount::native_whole(100); + let total_stake = validator_a_stake + validator_b_stake; let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ - (validator_a.clone(), Amount::native_whole(100)), - (validator_b, Amount::native_whole(100)), + (validator_a.clone(), validator_a_stake), + (validator_b, validator_b_stake), ]), ); bridge_pool_vp::init_storage(&mut wl_storage); @@ -1210,9 +1152,10 @@ mod tests { apply_eth_tx(tx.clone(), &mut wl_storage)?; apply_eth_tx(tx, &mut wl_storage)?; - let bp_root_keys = vote_tallies::Keys::from( - vote_tallies::BridgePoolRoot(EthereumProof::new((root, nonce))), - ); + let bp_root_keys = vote_tallies::Keys::from(( + &vote_tallies::BridgePoolRoot(EthereumProof::new((root, nonce))), + 100.into(), + )); let root_seen_by_bytes = wl_storage.read_bytes(&bp_root_keys.seen_by())?; assert_eq!( @@ -1222,8 +1165,10 @@ mod tests { // the vote should have only be applied once let voting_power: EpochedVotingPower = wl_storage.read(&bp_root_keys.voting_power())?.unwrap(); - let expected = - EpochedVotingPower::from([(0.into(), FractionalVotingPower::HALF)]); + let expected = EpochedVotingPower::from([( + 0.into(), + FractionalVotingPower::HALF * total_stake, + )]); assert_eq!(voting_power, expected); Ok(()) diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs index aa87ce29036..dd42680a2e9 100644 --- a/shared/src/ledger/queries/mod.rs +++ b/shared/src/ledger/queries/mod.rs @@ -4,8 +4,6 @@ // Re-export to show in rustdoc! pub use shell::Shell; use shell::SHELL; -#[cfg(any(test, feature = "async-client"))] -pub use types::Client; pub use types::{ EncodedResponseQuery, Error, RequestCtx, RequestQuery, ResponseQuery, Router, @@ -14,10 +12,13 @@ use vp::{Vp, VP}; pub use self::shell::eth_bridge::{ Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, + TransferToErcArgs, }; use super::storage::traits::StorageHasher; use super::storage::{DBIter, DB}; use super::storage_api; +#[cfg(any(test, feature = "async-client"))] +pub use crate::sdk::queries::Client; use crate::types::storage::BlockHeight; #[macro_use] diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 2db40f0a072..a766846916a 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -132,8 +132,6 @@ where &mut ctx.tx_wasm_cache, ), None, - #[cfg(not(feature = "mainnet"))] - false, ) .into_storage_result()?; @@ -144,12 +142,7 @@ where // hardcoded, dummy one let _privkey = ::G2Affine::prime_subgroup_generator(); - tx.update_header(TxType::Decrypted( - DecryptedTx::Decrypted { #[cfg(not(feature = "mainnet"))] - // To be able to dry-run testnet faucet withdrawal, pretend - // that we got a valid PoW - has_valid_pow: true }, - )); + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) } TxType::Protocol(_) | TxType::Decrypted(_) => { @@ -163,10 +156,7 @@ where } TxType::Raw => { // Cast tx to a decrypted for execution - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - has_valid_pow: true, - })); + tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // If dry run only the inner tx, use the max block gas as the gas // limit @@ -187,8 +177,6 @@ where &mut ctx.vp_wasm_cache, &mut ctx.tx_wasm_cache, ), - #[cfg(not(feature = "mainnet"))] - true, ) .into_storage_result()?; cumulated_gas = cumulated_gas @@ -631,12 +619,7 @@ mod test { // Request dry run tx let mut outer_tx = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted { - #[cfg(not(feature = "mainnet"))] - // To be able to dry-run testnet faucet withdrawal, pretend - // that we got a valid PoW - has_valid_pow: true, - })); + Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); outer_tx.set_code(Code::from_hash(tx_hash)); outer_tx.set_data(Data::new(vec![])); diff --git a/shared/src/ledger/queries/shell/eth_bridge.rs b/shared/src/ledger/queries/shell/eth_bridge.rs index 2588ef33bd5..0bbc0aa679d 100644 --- a/shared/src/ledger/queries/shell/eth_bridge.rs +++ b/shared/src/ledger/queries/shell/eth_bridge.rs @@ -16,7 +16,7 @@ use namada_core::types::eth_bridge_pool::PendingTransferAppendix; use namada_core::types::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, }; -use namada_core::types::ethereum_structs::RelayProof; +use namada_core::types::ethereum_structs; use namada_core::types::storage::{BlockHeight, DbKeySeg, Key}; use namada_core::types::token::Amount; use namada_core::types::vote_extensions::validator_set_update::{ @@ -31,8 +31,7 @@ use namada_ethereum_bridge::storage::eth_bridge_queries::EthBridgeQueries; use namada_ethereum_bridge::storage::proof::{sort_sigs, EthereumProof}; use namada_ethereum_bridge::storage::vote_tallies::{eth_msgs_prefix, Keys}; use namada_ethereum_bridge::storage::{ - bridge_contract_key, governance_contract_key, native_erc20_key, - vote_tallies, + bridge_contract_key, native_erc20_key, vote_tallies, }; use namada_proof_of_stake::pos_queries::PosQueries; @@ -69,11 +68,18 @@ pub struct GenBridgePoolProofReq<'transfers, 'relayer> { pub with_appendix: bool, } +/// Arguments to pass to `transfer_to_erc`. +pub type TransferToErcArgs = ( + ethereum_structs::ValidatorSetArgs, + Vec, + ethereum_structs::RelayProof, +); + /// Response data returned by `generate_bridge_pool_proof`. #[derive(Debug, Clone, Eq, PartialEq, BorshSerialize, BorshDeserialize)] pub struct GenBridgePoolProofRsp { - /// Ethereum ABI encoded [`RelayProof`]. - pub abi_encoded_proof: Vec, + /// Ethereum ABI encoded arguments to pass to `transfer_to_erc`. + pub abi_encoded_args: Vec, /// Appendix data of all requested pending transfers. pub appendices: Option>>, } @@ -81,9 +87,9 @@ pub struct GenBridgePoolProofRsp { impl GenBridgePoolProofRsp { /// Retrieve all [`PendingTransfer`] instances returned from the RPC server. pub fn pending_transfers(self) -> impl Iterator { - RelayProof::decode(&self.abi_encoded_proof) + TransferToErcArgs::decode(&self.abi_encoded_args) .into_iter() - .flat_map(|proof| proof.transfers) + .flat_map(|(_, _, proof)| proof.transfers) .zip(self.appendices.into_iter().flatten()) .map(|(event, appendix)| { let event: TransferToEthereum = event.into(); @@ -121,16 +127,17 @@ router! {ETH_BRIDGE, -> EncodeCell> = read_valset_upd_proof, - // Request the set of consensus validator at the given epoch. + // Request the set of bridge validators at the given epoch. // // The request may fail if no validator set exists at that epoch. - ( "validator_set" / "consensus" / [epoch: Epoch] ) - -> EncodeCell = read_consensus_valset, + ( "validator_set" / "bridge" / [epoch: Epoch] ) + -> ValidatorSetArgs = read_bridge_valset, - // Read the address and version of the Ethereum bridge's Governance - // smart contract. - ( "contracts" / "governance" ) - -> UpgradeableContract = read_governance_contract, + // Request the set of governance validators at the given epoch. + // + // The request may fail if no validator set exists at that epoch. + ( "validator_set" / "governance" / [epoch: Epoch] ) + -> ValidatorSetArgs = read_governance_valset, // Read the address and version of the Ethereum bridge's Bridge // smart contract. @@ -202,19 +209,6 @@ where Ok(contract) } -/// Read the address and version of the Ethereum bridge's Governance -/// smart contract. -#[inline] -fn read_governance_contract( - ctx: RequestCtx<'_, D, H>, -) -> storage_api::Result -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - read_contract(&governance_contract_key(), ctx) -} - /// Read the address and version of the Ethereum bridge's Bridge /// smart contract. #[inline] @@ -410,13 +404,8 @@ where let (validator_args, voting_powers) = ctx .wl_storage .ethbridge_queries() - .get_validator_set_args(None); - let relay_proof = RelayProof { - validator_set_args: validator_args.into(), - signatures: sort_sigs( - &voting_powers, - &signed_root.signatures, - ), + .get_bridge_validator_set(None); + let relay_proof = ethereum_structs::RelayProof { transfers, pool_root: signed_root.data.0.0, proof: proof.proof.into_iter().map(|hash| hash.0).collect(), @@ -424,10 +413,16 @@ where batch_nonce: signed_root.data.1.into(), relayer_address: relayer.to_string(), }; + let validator_set: ethereum_structs::ValidatorSetArgs = + validator_args.into(); + let signatures = + sort_sigs(&voting_powers, &signed_root.signatures); let rsp = GenBridgePoolProofRsp { - abi_encoded_proof: ethers::abi::AbiEncode::encode( + abi_encoded_args: ethers::abi::AbiEncode::encode(( + validator_set, + signatures, relay_proof, - ), + )), appendices: with_appendix.then_some(appendices), }; let data = rsp.try_to_vec().into_storage_result()?; @@ -504,7 +499,7 @@ where "Iterating over storage should not yield keys without \ values.", ) - .average_voting_power(ctx.wl_storage); + .fractional_stake(ctx.wl_storage); for transfer in transfers { let key = get_key_from_hash(&transfer.keccak256()); let transfer = ctx @@ -569,14 +564,44 @@ where Ok(proof.map(|set| (epoch, set)).encode()) } -/// Read the consensus set of validators at the given [`Epoch`]. +/// Request the set of bridge validators at the given epoch. +/// +/// This method may fail if no set of validators exists yet, +/// at that [`Epoch`]. +fn read_bridge_valset( + ctx: RequestCtx<'_, D, H>, + epoch: Epoch, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let current_epoch = ctx.wl_storage.storage.last_epoch; + if epoch > current_epoch.next() { + Err(storage_api::Error::Custom(CustomError( + format!( + "Requesting Bridge validator set at {epoch:?}, but the last \ + installed epoch is still {current_epoch:?}" + ) + .into(), + ))) + } else { + Ok(ctx + .wl_storage + .ethbridge_queries() + .get_bridge_validator_set(Some(epoch)) + .0) + } +} + +/// Request the set of governance validators at the given epoch. /// /// This method may fail if no set of validators exists yet, /// at that [`Epoch`]. -fn read_consensus_valset( +fn read_governance_valset( ctx: RequestCtx<'_, D, H>, epoch: Epoch, -) -> storage_api::Result> +) -> storage_api::Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -585,7 +610,7 @@ where if epoch > current_epoch.next() { Err(storage_api::Error::Custom(CustomError( format!( - "Requesting consensus validator set at {epoch:?}, but the \ + "Requesting Governance validator set at {epoch:?}, but the \ last installed epoch is still {current_epoch:?}" ) .into(), @@ -594,9 +619,8 @@ where Ok(ctx .wl_storage .ethbridge_queries() - .get_validator_set_args(Some(epoch)) - .0 - .encode()) + .get_governance_validator_set(Some(epoch)) + .0) } } @@ -638,7 +662,7 @@ where let (_, voting_powers) = ctx .wl_storage .ethbridge_queries() - .get_validator_set_args(Some(epoch)); + .get_bridge_validator_set(Some(epoch)); Ok(voting_powers) } @@ -678,7 +702,7 @@ mod test_ethbridge_router { }; use crate::types::ethereum_events::EthAddress; - /// Test that reading the consensus validator set works. + /// Test that reading the bridge validator set works. #[tokio::test] async fn test_read_consensus_valset() { let mut client = TestClient::new(RPC); @@ -699,7 +723,7 @@ mod test_ethbridge_router { let validator_set = RPC .shell() .eth_bridge() - .read_consensus_valset(&client, &epoch) + .read_bridge_valset(&client, &epoch) .await .unwrap(); let expected = { @@ -733,7 +757,6 @@ mod test_ethbridge_router { validators, voting_powers, } - .encode() }; assert_eq!(validator_set, expected); @@ -760,7 +783,7 @@ mod test_ethbridge_router { let result = RPC .shell() .eth_bridge() - .read_consensus_valset(&client, &Epoch(999_999)) + .read_bridge_valset(&client, &Epoch(999_999)) .await; let Err(err) = result else { panic!("Test failed"); @@ -1071,10 +1094,8 @@ mod test_ethbridge_router { let (validator_args, voting_powers) = client .wl_storage .ethbridge_queries() - .get_validator_set_args(None); - let data = RelayProof { - validator_set_args: validator_args.into(), - signatures: sort_sigs(&voting_powers, &signed_root.signatures), + .get_bridge_validator_set(None); + let relay_proof = ethereum_structs::RelayProof { transfers: vec![(&transfer).into()], pool_root: signed_root.data.0.0, proof: proof.proof.into_iter().map(|hash| hash.0).collect(), @@ -1082,8 +1103,15 @@ mod test_ethbridge_router { batch_nonce: Default::default(), relayer_address: bertha_address().to_string(), }; - let proof = ethers::abi::AbiEncode::encode(data); - assert_eq!(proof, resp.data.abi_encoded_proof); + let signatures = sort_sigs(&voting_powers, &signed_root.signatures); + let validator_set: ethereum_structs::ValidatorSetArgs = + validator_args.into(); + let encoded = ethers::abi::AbiEncode::encode(( + validator_set, + signatures, + relay_proof, + )); + assert_eq!(encoded, resp.data.abi_encoded_args); } /// Test if the merkle tree including a transfer has not had its @@ -1275,6 +1303,7 @@ mod test_ethbridge_router { }, }; // write validator to storage + let (_, dummy_validator_stake) = test_utils::default_validator(); test_utils::init_default_storage(&mut client.wl_storage); // write a transfer into the bridge pool @@ -1291,7 +1320,6 @@ mod test_ethbridge_router { let eth_event = EthereumEvent::TransfersToEthereum { nonce: Default::default(), transfers: vec![event_transfer.clone()], - valid_transfers_map: vec![true], relayer: bertha_address(), }; let eth_msg_key = vote_tallies::Keys::from(ð_event); @@ -1307,9 +1335,12 @@ mod test_ethbridge_router { .wl_storage .write_bytes( ð_msg_key.voting_power(), - EpochedVotingPower::from([(0.into(), voting_power)]) - .try_to_vec() - .expect("Test failed"), + EpochedVotingPower::from([( + 0.into(), + voting_power * dummy_validator_stake, + )]) + .try_to_vec() + .expect("Test failed"), ) .expect("Test failed"); client diff --git a/shared/src/ledger/queries/types.rs b/shared/src/ledger/queries/types.rs index 78f136cad27..235bf76e992 100644 --- a/shared/src/ledger/queries/types.rs +++ b/shared/src/ledger/queries/types.rs @@ -1,4 +1,4 @@ -use std::fmt::{Debug, Display}; +use std::fmt::Debug; use namada_core::ledger::storage::WlStorage; use thiserror::Error; @@ -6,15 +6,7 @@ use thiserror::Error; use crate::ledger::events::log::EventLog; use crate::ledger::storage::{DBIter, StorageHasher, DB}; use crate::ledger::storage_api; -use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::Proof; -use crate::tendermint_rpc::endpoint::{ - abci_info, block, block_results, blockchain, commit, consensus_params, - consensus_state, health, net_info, status, -}; -use crate::tendermint_rpc::error::Error as RpcError; -use crate::tendermint_rpc::query::Query; -use crate::tendermint_rpc::Order; use crate::types::storage::BlockHeight; #[cfg(feature = "wasm-runtime")] use crate::vm::wasm::{TxCache, VpCache}; @@ -78,221 +70,6 @@ pub trait Router { H: 'static + StorageHasher + Sync; } -/// A client with async request dispatcher method, which can be used to invoke -/// type-safe methods from a root [`Router`], generated via `router!` macro. -#[cfg(any(test, feature = "async-client"))] -#[async_trait::async_trait(?Send)] -pub trait Client { - /// `std::io::Error` can happen in decoding with - /// `BorshDeserialize::try_from_slice` - type Error: From + Display + Debug; - - /// Send a simple query request at the given path. For more options, use the - /// `request` method. - async fn simple_request( - &self, - path: String, - ) -> Result, Self::Error> { - self.request(path, None, None, false) - .await - .map(|response| response.data) - } - - /// Send a query request at the given path. - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result; - - /// `/abci_info`: get information about the ABCI application. - async fn abci_info(&self) -> Result { - Ok(self.perform(abci_info::Request).await?.response) - } - - /// `/broadcast_tx_sync`: broadcast a transaction, returning the response - /// from `CheckTx`. - async fn broadcast_tx_sync( - &self, - tx: crate::tendermint::abci::Transaction, - ) -> Result - { - self.perform( - tendermint_rpc::endpoint::broadcast::tx_sync::Request::new(tx), - ) - .await - } - - /// `/block`: get the latest block. - async fn latest_block(&self) -> Result { - self.perform(block::Request::default()).await - } - - /// `/block`: get block at a given height. - async fn block(&self, height: H) -> Result - where - H: Into + Send, - { - self.perform(block::Request::new(height.into())).await - } - - /// `/block_search`: search for blocks by BeginBlock and EndBlock events. - async fn block_search( - &self, - query: Query, - page: u32, - per_page: u8, - order: Order, - ) -> Result - { - self.perform(tendermint_rpc::endpoint::block_search::Request::new( - query, page, per_page, order, - )) - .await - } - - /// `/block_results`: get ABCI results for a block at a particular height. - async fn block_results( - &self, - height: H, - ) -> Result - where - H: Into + Send, - { - self.perform(tendermint_rpc::endpoint::block_results::Request::new( - height.into(), - )) - .await - } - - /// `/tx_search`: search for transactions with their results. - async fn tx_search( - &self, - query: Query, - prove: bool, - page: u32, - per_page: u8, - order: Order, - ) -> Result { - self.perform(tendermint_rpc::endpoint::tx_search::Request::new( - query, prove, page, per_page, order, - )) - .await - } - - /// `/abci_query`: query the ABCI application - async fn abci_query( - &self, - path: Option, - data: V, - height: Option, - prove: bool, - ) -> Result - where - V: Into> + Send, - { - Ok(self - .perform(tendermint_rpc::endpoint::abci_query::Request::new( - path, data, height, prove, - )) - .await? - .response) - } - - /// `/block_results`: get ABCI results for the latest block. - async fn latest_block_results( - &self, - ) -> Result { - self.perform(block_results::Request::default()).await - } - - /// `/blockchain`: get block headers for `min` <= `height` <= `max`. - /// - /// Block headers are returned in descending order (highest first). - /// - /// Returns at most 20 items. - async fn blockchain( - &self, - min: H, - max: H, - ) -> Result - where - H: Into + Send, - { - // TODO(tarcieri): return errors for invalid params before making - // request? - self.perform(blockchain::Request::new(min.into(), max.into())) - .await - } - - /// `/commit`: get block commit at a given height. - async fn commit(&self, height: H) -> Result - where - H: Into + Send, - { - self.perform(commit::Request::new(height.into())).await - } - - /// `/consensus_params`: get current consensus parameters at the specified - /// height. - async fn consensus_params( - &self, - height: H, - ) -> Result - where - H: Into + Send, - { - self.perform(consensus_params::Request::new(Some(height.into()))) - .await - } - - /// `/consensus_state`: get current consensus state - async fn consensus_state( - &self, - ) -> Result { - self.perform(consensus_state::Request::new()).await - } - - /// `/consensus_params`: get the latest consensus parameters. - async fn latest_consensus_params( - &self, - ) -> Result { - self.perform(consensus_params::Request::new(None)).await - } - - /// `/commit`: get the latest block commit - async fn latest_commit(&self) -> Result { - self.perform(commit::Request::default()).await - } - - /// `/health`: get node health. - /// - /// Returns empty result (200 OK) on success, no response in case of an - /// error. - async fn health(&self) -> Result<(), RpcError> { - self.perform(health::Request).await?; - Ok(()) - } - - /// `/net_info`: obtain information about P2P and other network connections. - async fn net_info(&self) -> Result { - self.perform(net_info::Request).await - } - - /// `/status`: get Tendermint status including node info, pubkey, latest - /// block hash, app hash, block height and time. - async fn status(&self) -> Result { - self.perform(status::Request).await - } - - /// Perform a request against the RPC endpoint - async fn perform(&self, request: R) -> Result - where - R: tendermint_rpc::SimpleRequest; -} - #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -306,52 +83,6 @@ pub enum Error { InvalidHeight(BlockHeight), } -#[async_trait::async_trait(?Send)] -impl Client for C { - type Error = Error; - - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result { - let data = data.unwrap_or_default(); - let height = height - .map(|height| { - crate::tendermint::block::Height::try_from(height.0) - .map_err(|_err| Error::InvalidHeight(height)) - }) - .transpose()?; - let response = self - .abci_query( - // TODO open the private Path constructor in tendermint-rpc - Some(std::str::FromStr::from_str(&path).unwrap()), - data, - height, - prove, - ) - .await?; - use crate::tendermint::abci::Code; - match response.code { - Code::Ok => Ok(EncodedResponseQuery { - data: response.value, - info: response.info, - proof: response.proof, - }), - Code::Err(code) => Err(Error::Query(response.info, code)), - } - } - - async fn perform(&self, request: R) -> Result - where - R: tendermint_rpc::SimpleRequest, - { - tendermint_rpc::Client::perform(self, request).await - } -} - /// Temporary domain-type for `tendermint_proto::abci::RequestQuery`, copied /// from /// until we are on a branch that has it included. diff --git a/shared/src/ledger/queries/vp/mod.rs b/shared/src/ledger/queries/vp/mod.rs index c53386b2f8f..48e39b5c8b1 100644 --- a/shared/src/ledger/queries/vp/mod.rs +++ b/shared/src/ledger/queries/vp/mod.rs @@ -22,135 +22,3 @@ router! {VP, ( "governance" ) = (sub GOV), ( "pgf" ) = (sub PGF), } - -/// Client-only methods for the router type are composed from router functions. -#[cfg(any(test, feature = "async-client"))] -pub mod client_only_methods { - #[cfg(not(feature = "mainnet"))] - use borsh::BorshDeserialize; - #[cfg(not(feature = "mainnet"))] - use namada_core::ledger::testnet_pow; - - use super::Vp; - #[cfg(not(feature = "mainnet"))] - use crate::ledger::queries::{Client, RPC}; - #[cfg(not(feature = "mainnet"))] - use crate::types::address::Address; - - impl Vp { - #[cfg(not(feature = "mainnet"))] - /// Get faucet account address, if any is setup for the network. - pub async fn get_faucet_address( - &self, - client: &CLIENT, - ) -> Result, ::Error> - where - CLIENT: Client + Sync, - { - let faucet_account_key = namada_core::ledger::parameters::storage::get_faucet_account_key(); - if RPC - .shell() - .storage_has_key(client, &faucet_account_key) - .await? - { - let faucet_account = Address::try_from_slice( - &RPC.shell() - .storage_value( - client, - None, - None, - false, - &faucet_account_key, - ) - .await? - .data, - ) - .expect("Faucet address couldn't be read"); - Ok(Some(faucet_account)) - } else { - Ok(None) - } - } - - #[cfg(not(feature = "mainnet"))] - /// Check if the given address is a faucet account address. - pub async fn is_faucet( - &self, - client: &CLIENT, - address: &Address, - ) -> Result::Error> - where - CLIENT: Client + Sync, - { - if let Some(faucet_address) = - self.get_faucet_address(client).await? - { - Ok(address == &faucet_address) - } else { - Ok(false) - } - } - - #[cfg(not(feature = "mainnet"))] - /// Get a faucet PoW challenge for token withdrawal. - pub async fn testnet_pow_challenge( - &self, - client: &CLIENT, - source: Address, - ) -> Result::Error> - where - CLIENT: Client + Sync, - { - let params = self.testnet_pow_params(client, &source).await?; - Ok(testnet_pow::Challenge { source, params }) - } - - #[cfg(not(feature = "mainnet"))] - /// Read faucet PoW challenge parameters for token withdrawal. - pub async fn testnet_pow_params( - &self, - client: &CLIENT, - source: &Address, - ) -> Result::Error> - where - CLIENT: Client + Sync, - { - let faucet_address = self - .get_faucet_address(client) - .await? - .expect("No faucet account found"); - let difficulty_key = &testnet_pow::difficulty_key(&faucet_address); - let counter_key = &testnet_pow::counters_handle(&faucet_address) - .get_data_key(source); - let difficulty = testnet_pow::Difficulty::try_from_slice( - &RPC.shell() - .storage_value(client, None, None, false, difficulty_key) - .await? - .data, - ) - .expect("Faucet PoW difficulty couldn't be read"); - let counter = if RPC - .shell() - .storage_has_key(client, counter_key) - .await? - { - testnet_pow::Counter::try_from_slice( - &RPC.shell() - .storage_value(client, None, None, false, counter_key) - .await? - .data, - ) - .expect("Faucet counter has unexpected encoding") - } else { - // `0` if not previously set (same as - // `testnet_pow::get_counter`) - testnet_pow::Counter::default() - }; - - Ok(testnet_pow::ChallengeParams { - difficulty, - counter, - }) - } - } -} diff --git a/shared/src/ledger/queries/vp/pos.rs b/shared/src/ledger/queries/vp/pos.rs index aff5181a19b..e78bff146b6 100644 --- a/shared/src/ledger/queries/vp/pos.rs +++ b/shared/src/ledger/queries/vp/pos.rs @@ -531,7 +531,8 @@ where #[cfg(any(test, feature = "async-client"))] pub mod client_only_methods { use super::*; - use crate::ledger::queries::{Client, RPC}; + use crate::ledger::queries::RPC; + use crate::sdk::queries::Client; impl Pos { /// Get bonds and unbonds with all details (slashes and rewards, if any) diff --git a/shared/src/ledger/queries/vp/token.rs b/shared/src/ledger/queries/vp/token.rs index ffb1117c913..3b99cb0fdaa 100644 --- a/shared/src/ledger/queries/vp/token.rs +++ b/shared/src/ledger/queries/vp/token.rs @@ -30,7 +30,8 @@ pub mod client_only_methods { use borsh::BorshDeserialize; use super::Token; - use crate::ledger::queries::{Client, RPC}; + use crate::ledger::queries::RPC; + use crate::sdk::queries::Client; use crate::types::address::Address; use crate::types::token; diff --git a/shared/src/lib.rs b/shared/src/lib.rs index 4bedc091e5a..3036c4cb47e 100644 --- a/shared/src/lib.rs +++ b/shared/src/lib.rs @@ -24,6 +24,7 @@ pub use { }; pub mod ledger; pub use namada_core::proto; +pub mod sdk; pub mod types; pub mod vm; diff --git a/shared/src/ledger/args.rs b/shared/src/sdk/args.rs similarity index 98% rename from shared/src/ledger/args.rs rename to shared/src/sdk/args.rs index d3413c588b9..b765dece5a0 100644 --- a/shared/src/ledger/args.rs +++ b/shared/src/sdk/args.rs @@ -822,9 +822,18 @@ pub struct RelayBridgePoolProof { pub safe_mode: bool, } -/// Consensus validator set arguments. +/// Bridge validator set arguments. #[derive(Debug, Clone)] -pub struct ConsensusValidatorSet { +pub struct BridgeValidatorSet { + /// The query parameters. + pub query: Query, + /// The epoch to query. + pub epoch: Option, +} + +/// Governance validator set arguments. +#[derive(Debug, Clone)] +pub struct GovernanceValidatorSet { /// The query parameters. pub query: Query, /// The epoch to query. diff --git a/shared/src/types/error.rs b/shared/src/sdk/error.rs similarity index 99% rename from shared/src/types/error.rs rename to shared/src/sdk/error.rs index 20d278ddea7..eb5f2b2d166 100644 --- a/shared/src/types/error.rs +++ b/shared/src/sdk/error.rs @@ -9,7 +9,7 @@ use prost::EncodeError; use tendermint_rpc::Error as RpcError; use thiserror::Error; -use crate::types::error::Error::Pinned; +use crate::sdk::error::Error::Pinned; use crate::vm::WasmValidationError; /// The standard Result type that most code ought to return diff --git a/shared/src/ledger/masp.rs b/shared/src/sdk/masp.rs similarity index 98% rename from shared/src/ledger/masp.rs rename to shared/src/sdk/masp.rs index 0ddb7cb6a07..2058844f635 100644 --- a/shared/src/ledger/masp.rs +++ b/shared/src/sdk/masp.rs @@ -59,18 +59,17 @@ use ripemd::Digest as RipemdDigest; use sha2::Digest; use thiserror::Error; -use crate::ledger::args::InputAmount; -use crate::ledger::queries::Client; -use crate::ledger::rpc::{query_conversion, query_storage_value}; -use crate::ledger::tx::decode_component; -use crate::ledger::{args, rpc}; use crate::proto::Tx; +use crate::sdk::args::InputAmount; +use crate::sdk::error::{EncodingError, Error, PinnedBalanceError, QueryError}; +use crate::sdk::queries::Client; +use crate::sdk::rpc::{query_conversion, query_storage_value}; +use crate::sdk::tx::decode_component; +use crate::sdk::{args, rpc}; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::address::{masp, Address}; -use crate::types::error::{ - EncodingError, Error, PinnedBalanceError, QueryError, -}; +use crate::types::io::Io; use crate::types::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use crate::types::token; @@ -78,6 +77,7 @@ use crate::types::token::{ Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, }; use crate::types::transaction::{EllipticCurve, PairingEngine, WrapperTx}; +use crate::{display_line, edisplay_line}; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. @@ -1025,7 +1025,7 @@ impl ShieldedContext { /// context and express that value in terms of the currently timestamped /// asset types. If the key is not in the context, then we do not know the /// balance and hence we return None. - pub async fn compute_exchanged_balance( + pub async fn compute_exchanged_balance( &mut self, client: &C, vk: &ViewingKey, @@ -1035,7 +1035,7 @@ impl ShieldedContext { if let Some(balance) = self.compute_shielded_balance(client, vk).await? { let exchanged_amount = self - .compute_exchanged_amount( + .compute_exchanged_amount::<_, IO>( client, balance, target_epoch, @@ -1058,7 +1058,7 @@ impl ShieldedContext { /// the trace amount that could not be converted is moved from input to /// output. #[allow(clippy::too_many_arguments)] - async fn apply_conversion( + async fn apply_conversion( &mut self, client: &C, conv: AllowedConversion, @@ -1079,7 +1079,8 @@ impl ShieldedContext { make_asset_type(Some(asset_type.0), &asset_type.1, asset_type.2)?; let threshold = -conv[&masp_asset]; if threshold == 0 { - eprintln!( + edisplay_line!( + IO, "Asset threshold of selected conversion for asset type {} is \ 0, this is a bug, please report it.", masp_asset @@ -1109,7 +1110,7 @@ impl ShieldedContext { /// note of the conversions that were used. Note that this function does /// not assume that allowed conversions from the ledger are expressed in /// terms of the latest asset types. - pub async fn compute_exchanged_amount( + pub async fn compute_exchanged_amount( &mut self, client: &C, mut input: MaspAmount, @@ -1147,14 +1148,15 @@ impl ShieldedContext { if let (Some((conv, _wit, usage)), false) = (conversions.get_mut(&asset_type), at_target_asset_type) { - println!( + display_line!( + IO, "converting current asset type to latest asset type..." ); // Not at the target asset type, not at the latest asset // type. Apply conversion to get from // current asset type to the latest // asset type. - self.apply_conversion( + self.apply_conversion::<_, IO>( client, conv.clone(), (asset_epoch, token_addr.clone(), denom), @@ -1168,14 +1170,15 @@ impl ShieldedContext { conversions.get_mut(&target_asset_type), at_target_asset_type, ) { - println!( + display_line!( + IO, "converting latest asset type to target asset type..." ); // Not at the target asset type, yet at the latest asset // type. Apply inverse conversion to get // from latest asset type to the target // asset type. - self.apply_conversion( + self.apply_conversion::<_, IO>( client, conv.clone(), (asset_epoch, token_addr.clone(), denom), @@ -1210,7 +1213,7 @@ impl ShieldedContext { /// of the specified asset type. Return the total value accumulated plus /// notes and the corresponding diversifiers/merkle paths that were used to /// achieve the total value. - pub async fn collect_unspent_notes( + pub async fn collect_unspent_notes( &mut self, client: &C, vk: &ViewingKey, @@ -1256,7 +1259,7 @@ impl ShieldedContext { })?; let input = self.decode_all_amounts(client, pre_contr).await; let (contr, proposed_convs) = self - .compute_exchanged_amount( + .compute_exchanged_amount::<_, IO>( client, input, target_epoch, @@ -1286,7 +1289,7 @@ impl ShieldedContext { .path() .ok_or_else(|| { Error::Other(format!( - "Un able to get path: {}", + "Unable to get path: {}", line!() )) })?; @@ -1395,7 +1398,7 @@ impl ShieldedContext { /// the epoch of the transaction or even before, so exchange all these /// amounts to the epoch of the transaction in order to get the value that /// would have been displayed in the epoch of the transaction. - pub async fn compute_exchanged_pinned_balance( + pub async fn compute_exchanged_pinned_balance( &mut self, client: &C, owner: PaymentAddress, @@ -1404,16 +1407,21 @@ impl ShieldedContext { // Obtain the balance that will be exchanged let (amt, ep) = Self::compute_pinned_balance(client, owner, viewing_key).await?; - println!("Pinned balance: {:?}", amt); + display_line!(IO, "Pinned balance: {:?}", amt); // Establish connection with which to do exchange rate queries let amount = self.decode_all_amounts(client, amt).await; - println!("Decoded pinned balance: {:?}", amount); + display_line!(IO, "Decoded pinned balance: {:?}", amount); // Finally, exchange the balance to the transaction's epoch let computed_amount = self - .compute_exchanged_amount(client, amount, ep, BTreeMap::new()) + .compute_exchanged_amount::<_, IO>( + client, + amount, + ep, + BTreeMap::new(), + ) .await? .0; - println!("Exchanged amount: {:?}", computed_amount); + display_line!(IO, "Exchanged amount: {:?}", computed_amount); Ok((self.decode_all_amounts(client, computed_amount).await, ep)) } @@ -1475,7 +1483,7 @@ impl ShieldedContext { /// understood that transparent account changes are effected only by the /// amounts and signatures specified by the containing Transfer object. #[cfg(feature = "masp-tx-gen")] - pub async fn gen_shielded_transfer( + pub async fn gen_shielded_transfer( &mut self, client: &C, args: args::TxTransfer, @@ -1545,7 +1553,7 @@ impl ShieldedContext { if let Some(sk) = spending_key { // Locate unspent notes that can help us meet the transaction amount let (_, unspent_notes, used_convs) = self - .collect_unspent_notes( + .collect_unspent_notes::<_, IO>( client, &to_viewing_key(&sk).vk, I128Sum::from_sum(amount), diff --git a/shared/src/sdk/mod.rs b/shared/src/sdk/mod.rs new file mode 100644 index 00000000000..381bac03d1c --- /dev/null +++ b/shared/src/sdk/mod.rs @@ -0,0 +1,12 @@ +//! Namada's SDK API +pub mod rpc; + +pub mod args; +pub mod masp; +pub mod signing; +#[allow(clippy::result_large_err)] +pub mod tx; + +pub mod error; +pub mod queries; +pub mod wallet; diff --git a/shared/src/sdk/queries.rs b/shared/src/sdk/queries.rs new file mode 100644 index 00000000000..4877f21c9e1 --- /dev/null +++ b/shared/src/sdk/queries.rs @@ -0,0 +1,275 @@ +//! Query functionality related to the SDK +use std::fmt::{Debug, Display}; + +use namada_core::types::storage::BlockHeight; +use tendermint_rpc::endpoint::{ + abci_info, block, block_results, blockchain, commit, consensus_params, + consensus_state, health, net_info, status, +}; +use tendermint_rpc::query::Query; +use tendermint_rpc::{Error as RpcError, Order}; + +use crate::ledger::queries::{EncodedResponseQuery, Error}; +use crate::tendermint::block::Height; + +/// A client with async request dispatcher method, which can be used to invoke +/// type-safe methods from a root [`crate::ledger::queries::Router`], generated +/// via `router!` macro. +#[cfg(any(test, feature = "async-client"))] +#[async_trait::async_trait(?Send)] +pub trait Client { + /// `std::io::Error` can happen in decoding with + /// `BorshDeserialize::try_from_slice` + type Error: From + Display + Debug; + + /// Send a simple query request at the given path. For more options, use the + /// `request` method. + async fn simple_request( + &self, + path: String, + ) -> Result, Self::Error> { + self.request(path, None, None, false) + .await + .map(|response| response.data) + } + + /// Send a query request at the given path. + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result; + + /// `/abci_info`: get information about the ABCI application. + async fn abci_info(&self) -> Result { + Ok(self.perform(abci_info::Request).await?.response) + } + + /// `/broadcast_tx_sync`: broadcast a transaction, returning the response + /// from `CheckTx`. + async fn broadcast_tx_sync( + &self, + tx: crate::tendermint::abci::Transaction, + ) -> Result + { + self.perform( + tendermint_rpc::endpoint::broadcast::tx_sync::Request::new(tx), + ) + .await + } + + /// `/block`: get the latest block. + async fn latest_block(&self) -> Result { + self.perform(block::Request::default()).await + } + + /// `/block`: get block at a given height. + async fn block(&self, height: H) -> Result + where + H: Into + Send, + { + self.perform(block::Request::new(height.into())).await + } + + /// `/block_search`: search for blocks by BeginBlock and EndBlock events. + async fn block_search( + &self, + query: Query, + page: u32, + per_page: u8, + order: Order, + ) -> Result + { + self.perform(tendermint_rpc::endpoint::block_search::Request::new( + query, page, per_page, order, + )) + .await + } + + /// `/block_results`: get ABCI results for a block at a particular height. + async fn block_results( + &self, + height: H, + ) -> Result + where + H: Into + Send, + { + self.perform(tendermint_rpc::endpoint::block_results::Request::new( + height.into(), + )) + .await + } + + /// `/tx_search`: search for transactions with their results. + async fn tx_search( + &self, + query: Query, + prove: bool, + page: u32, + per_page: u8, + order: Order, + ) -> Result { + self.perform(tendermint_rpc::endpoint::tx_search::Request::new( + query, prove, page, per_page, order, + )) + .await + } + + /// `/abci_query`: query the ABCI application + async fn abci_query( + &self, + path: Option, + data: V, + height: Option, + prove: bool, + ) -> Result + where + V: Into> + Send, + { + Ok(self + .perform(tendermint_rpc::endpoint::abci_query::Request::new( + path, data, height, prove, + )) + .await? + .response) + } + + /// `/block_results`: get ABCI results for the latest block. + async fn latest_block_results( + &self, + ) -> Result { + self.perform(block_results::Request::default()).await + } + + /// `/blockchain`: get block headers for `min` <= `height` <= `max`. + /// + /// Block headers are returned in descending order (highest first). + /// + /// Returns at most 20 items. + async fn blockchain( + &self, + min: H, + max: H, + ) -> Result + where + H: Into + Send, + { + // TODO(tarcieri): return errors for invalid params before making + // request? + self.perform(blockchain::Request::new(min.into(), max.into())) + .await + } + + /// `/commit`: get block commit at a given height. + async fn commit(&self, height: H) -> Result + where + H: Into + Send, + { + self.perform(commit::Request::new(height.into())).await + } + + /// `/consensus_params`: get current consensus parameters at the specified + /// height. + async fn consensus_params( + &self, + height: H, + ) -> Result + where + H: Into + Send, + { + self.perform(consensus_params::Request::new(Some(height.into()))) + .await + } + + /// `/consensus_state`: get current consensus state + async fn consensus_state( + &self, + ) -> Result { + self.perform(consensus_state::Request::new()).await + } + + /// `/consensus_params`: get the latest consensus parameters. + async fn latest_consensus_params( + &self, + ) -> Result { + self.perform(consensus_params::Request::new(None)).await + } + + /// `/commit`: get the latest block commit + async fn latest_commit(&self) -> Result { + self.perform(commit::Request::default()).await + } + + /// `/health`: get node health. + /// + /// Returns empty result (200 OK) on success, no response in case of an + /// error. + async fn health(&self) -> Result<(), RpcError> { + self.perform(health::Request).await?; + Ok(()) + } + + /// `/net_info`: obtain information about P2P and other network connections. + async fn net_info(&self) -> Result { + self.perform(net_info::Request).await + } + + /// `/status`: get Tendermint status including node info, pubkey, latest + /// block hash, app hash, block height and time. + async fn status(&self) -> Result { + self.perform(status::Request).await + } + + /// Perform a request against the RPC endpoint + async fn perform(&self, request: R) -> Result + where + R: tendermint_rpc::SimpleRequest; +} + +#[async_trait::async_trait(?Send)] +impl Client for C { + type Error = Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height + .map(|height| { + crate::tendermint::block::Height::try_from(height.0) + .map_err(|_err| Error::InvalidHeight(height)) + }) + .transpose()?; + let response = self + .abci_query( + // TODO open the private Path constructor in tendermint-rpc + Some(std::str::FromStr::from_str(&path).unwrap()), + data, + height, + prove, + ) + .await?; + use crate::tendermint::abci::Code; + match response.code { + Code::Ok => Ok(EncodedResponseQuery { + data: response.value, + info: response.info, + proof: response.proof, + }), + Code::Err(code) => Err(Error::Query(response.info, code)), + } + } + + async fn perform(&self, request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + tendermint_rpc::Client::perform(self, request).await + } +} diff --git a/shared/src/ledger/rpc.rs b/shared/src/sdk/rpc.rs similarity index 93% rename from shared/src/ledger/rpc.rs rename to shared/src/sdk/rpc.rs index ddef3ecb209..58609bed427 100644 --- a/shared/src/ledger/rpc.rs +++ b/shared/src/sdk/rpc.rs @@ -12,8 +12,6 @@ use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::ledger::governance::storage::proposal::StorageProposal; use namada_core::ledger::governance::utils::Vote; use namada_core::ledger::storage::LastBlock; -#[cfg(not(feature = "mainnet"))] -use namada_core::ledger::testnet_pow; use namada_core::types::account::Account; use namada_core::types::address::Address; use namada_core::types::storage::Key; @@ -26,28 +24,31 @@ use namada_proof_of_stake::types::{ }; use serde::Serialize; -use crate::ledger::args::InputAmount; use crate::ledger::events::Event; use crate::ledger::queries::vp::pos::EnrichedBondsAndUnbondsDetails; use crate::ledger::queries::RPC; use crate::proto::Tx; +use crate::sdk::args::InputAmount; +use crate::sdk::error; +use crate::sdk::error::{EncodingError, Error, QueryError}; use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::Proof; use crate::tendermint_rpc::error::Error as TError; use crate::tendermint_rpc::query::Query; use crate::tendermint_rpc::Order; use crate::types::control_flow::{time, Halt, TryHalt}; -use crate::types::error::{EncodingError, Error, QueryError}; use crate::types::hash::Hash; +use crate::types::io::Io; use crate::types::key::common; use crate::types::storage::{BlockHeight, BlockResults, Epoch, PrefixValue}; -use crate::types::{error, storage, token}; +use crate::types::{storage, token}; +use crate::{display_line, edisplay_line}; /// Query the status of a given transaction. /// /// If a response is not delivered until `deadline`, we exit the cli with an /// error. -pub async fn query_tx_status( +pub async fn query_tx_status( client: &C, status: TxEventQuery<'_>, deadline: time::Instant, @@ -88,7 +89,10 @@ where }) .await .try_halt(|_| { - eprintln!("Transaction status query deadline of {deadline:?} exceeded"); + edisplay_line!( + IO, + "Transaction status query deadline of {deadline:?} exceeded" + ); }) } @@ -213,39 +217,6 @@ pub async fn known_address( } } -#[cfg(not(feature = "mainnet"))] -/// Check if the given address is a testnet faucet account address. -pub async fn is_faucet_account( - client: &C, - address: &Address, -) -> bool { - unwrap_client_response::(RPC.vp().is_faucet(client, address).await) -} - -#[cfg(not(feature = "mainnet"))] -/// Get faucet account address, if any is setup for the network. -pub async fn get_faucet_address( - client: &C, -) -> Option
{ - unwrap_client_response::>( - RPC.vp().get_faucet_address(client).await, - ) -} - -#[cfg(not(feature = "mainnet"))] -/// Obtain a PoW challenge for a withdrawal from a testnet faucet account, if -/// any is setup for the network. -pub async fn get_testnet_pow_challenge< - C: crate::ledger::queries::Client + Sync, ->( - client: &C, - source: Address, -) -> testnet_pow::Challenge { - unwrap_client_response::( - RPC.vp().testnet_pow_challenge(client, source).await, - ) -} - // Consider how we want to handle this unwrap. It gets used in contexts that // often ignore the optional value and do not have any error type surrounding // it. @@ -266,7 +237,10 @@ pub async fn query_conversion( } /// Query a wasm code hash -pub async fn query_wasm_code_hash( +pub async fn query_wasm_code_hash< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, code_path: impl AsRef, ) -> Result { @@ -277,7 +251,8 @@ pub async fn query_wasm_code_hash( { Some(hash) => Ok(Hash::try_from(&hash[..]).expect("Invalid code hash")), None => { - eprintln!( + edisplay_line!( + IO, "The corresponding wasm code of the code path {} doesn't \ exist on chain.", code_path.as_ref(), @@ -351,7 +326,11 @@ pub async fn query_storage_value_bytes< /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. -pub async fn query_storage_prefix( +pub async fn query_storage_prefix< + C: crate::ledger::queries::Client + Sync, + IO: Io, + T, +>( client: &C, key: &storage::Key, ) -> Result>, error::Error> @@ -368,9 +347,11 @@ where &value[..], ) { Err(err) => { - eprintln!( + edisplay_line!( + IO, "Skipping a value for key {}. Error in decoding: {}", - key, err + key, + err ); None } @@ -456,7 +437,7 @@ pub async fn query_tx_events( } /// Dry run a transaction -pub async fn dry_run_tx( +pub async fn dry_run_tx( client: &C, tx_bytes: Vec, ) -> Result { @@ -465,7 +446,7 @@ pub async fn dry_run_tx( RPC.shell().dry_run_tx(client, data, height, prove).await, )? .data; - println! {"Dry-run result: {}", result}; + display_line!(IO, "Dry-run result: {}", result); Ok(result) } @@ -737,8 +718,6 @@ pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, Error> { - // let a = RPC.vp().gov().proposal_id(client, &proposal_id).await; - // println!("{:?}", a.err().unwrap()); convert_response::( RPC.vp().gov().proposal_id(client, &proposal_id).await, ) @@ -810,6 +789,7 @@ pub async fn get_public_key_at( /// Query a validator's unbonds for a given epoch pub async fn query_and_print_unbonds< C: crate::ledger::queries::Client + Sync, + IO: Io, >( client: &C, source: &Address, @@ -830,16 +810,18 @@ pub async fn query_and_print_unbonds< } } if total_withdrawable != token::Amount::default() { - println!( + display_line!( + IO, "Total withdrawable now: {}.", total_withdrawable.to_string_native() ); } if !not_yet_withdrawable.is_empty() { - println!("Current epoch: {current_epoch}.") + display_line!(IO, "Current epoch: {current_epoch}.") } for (withdraw_epoch, amount) in not_yet_withdrawable { - println!( + display_line!( + IO, "Amount {} withdrawable starting from epoch {withdraw_epoch}.", amount.to_string_native() ); @@ -929,6 +911,7 @@ pub async fn bonds_and_unbonds( .await, ) } + /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs, enriched with extra information calculated from /// the data. @@ -954,7 +937,10 @@ pub async fn enriched_bonds_and_unbonds< } /// Get the correct representation of the amount given the token type. -pub async fn validate_amount( +pub async fn validate_amount< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, amount: InputAmount, token: &Address, @@ -971,13 +957,15 @@ pub async fn validate_amount( Some(denom) => Ok(denom), None => { if force { - println!( + display_line!( + IO, "No denomination found for token: {token}, but --force \ was passed. Defaulting to the provided denomination." ); Ok(input_amount.denom) } else { - println!( + display_line!( + IO, "No denomination found for token: {token}, the input \ arguments could not be parsed." ); @@ -988,7 +976,8 @@ pub async fn validate_amount( } }?; if denom < input_amount.denom && !force { - println!( + display_line!( + IO, "The input amount contained a higher precision than allowed by \ {token}." ); @@ -998,7 +987,8 @@ pub async fn validate_amount( )))) } else { input_amount.increase_precision(denom).map_err(|_err| { - println!( + display_line!( + IO, "The amount provided requires more the 256 bits to represent." ); Error::from(QueryError::General( @@ -1011,7 +1001,7 @@ pub async fn validate_amount( } /// Wait for a first block and node to be synced. -pub async fn wait_until_node_is_synched(client: &C) -> Halt<()> +pub async fn wait_until_node_is_synched(client: &C) -> Halt<()> where C: crate::ledger::queries::Client + Sync, { @@ -1035,7 +1025,8 @@ where if is_at_least_height_one && !is_catching_up { return ControlFlow::Break(Ok(())); } - println!( + display_line!( + IO, " Waiting for {} ({}/{} tries)...", if is_at_least_height_one { "a first block" @@ -1049,7 +1040,11 @@ where ControlFlow::Continue(()) } Err(e) => { - eprintln!("Failed to query node status with error: {}", e); + edisplay_line!( + IO, + "Failed to query node status with error: {}", + e + ); ControlFlow::Break(Err(())) } } @@ -1057,7 +1052,10 @@ where .await // maybe time out .try_halt(|_| { - println!("Node is still catching up, wait for it to finish synching."); + display_line!( + IO, + "Node is still catching up, wait for it to finish synching." + ); })? // error querying rpc .try_halt(|_| ()) @@ -1067,6 +1065,7 @@ where /// correctly as a string. pub async fn format_denominated_amount< C: crate::ledger::queries::Client + Sync, + IO: Io, >( client: &C, token: &Address, @@ -1076,11 +1075,12 @@ pub async fn format_denominated_amount< RPC.vp().token().denomination(client, token).await, ) .unwrap_or_else(|t| { - println!("Error in querying for denomination: {t}"); + display_line!(IO, "Error in querying for denomination: {t}"); None }) .unwrap_or_else(|| { - println!( + display_line!( + IO, "No denomination found for token: {token}, defaulting to zero \ decimal places" ); diff --git a/shared/src/ledger/signing.rs b/shared/src/sdk/signing.rs similarity index 84% rename from shared/src/ledger/signing.rs rename to shared/src/sdk/signing.rs index 1753c645d35..145daa8213e 100644 --- a/shared/src/ledger/signing.rs +++ b/shared/src/sdk/signing.rs @@ -1,5 +1,4 @@ //! Functions to sign transactions - use std::collections::{BTreeMap, HashMap}; use std::path::PathBuf; @@ -24,24 +23,27 @@ use serde::{Deserialize, Serialize}; use sha2::Digest; use zeroize::Zeroizing; -use super::masp::{ShieldedContext, ShieldedTransfer, ShieldedUtils}; -use super::rpc::validate_amount; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc_proto::google::protobuf::Any; -use crate::ledger::masp::make_asset_type; use crate::ledger::parameters::storage as parameter_storage; -use crate::ledger::rpc::{format_denominated_amount, query_wasm_code_hash}; -use crate::ledger::tx::{ +use crate::proto::{MaspBuilder, Section, Tx}; +use crate::sdk::error::{EncodingError, Error, TxError}; +use crate::sdk::masp::{ + make_asset_type, ShieldedContext, ShieldedTransfer, ShieldedUtils, +}; +use crate::sdk::rpc::{ + format_denominated_amount, query_wasm_code_hash, validate_amount, +}; +use crate::sdk::tx::{ TX_BOND_WASM, TX_CHANGE_COMMISSION_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, TX_INIT_PROPOSAL, TX_INIT_VALIDATOR_WASM, TX_REVEAL_PK, TX_TRANSFER_WASM, TX_UNBOND_WASM, TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, VP_USER_WASM, }; -pub use crate::ledger::wallet::store::AddressVpType; -use crate::ledger::wallet::{Wallet, WalletUtils}; -use crate::ledger::{args, rpc}; -use crate::proto::{MaspBuilder, Section, Tx}; -use crate::types::error::{EncodingError, Error, TxError}; +pub use crate::sdk::wallet::store::AddressVpType; +use crate::sdk::wallet::{Wallet, WalletUtils}; +use crate::sdk::{args, rpc}; +use crate::types::io::*; use crate::types::key::*; use crate::types::masp::{ExtendedViewingKey, PaymentAddress}; use crate::types::storage::Epoch; @@ -51,7 +53,8 @@ use crate::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use crate::types::transaction::pos::InitValidator; -use crate::types::transaction::{Fee, TxType}; +use crate::types::transaction::Fee; +use crate::{display_line, edisplay_line}; #[cfg(feature = "std")] /// Env. var specifying where to store signing test vectors @@ -63,6 +66,8 @@ const ENV_VAR_TX_LOG_PATH: &str = "NAMADA_TX_LOG_PATH"; /// A struture holding the signing data to craft a transaction #[derive(Clone)] pub struct SigningTxData { + /// The address owning the transaction + pub owner: Option
, /// The public keys associated to an account pub public_keys: Vec, /// The threshold associated to an account @@ -80,6 +85,7 @@ pub struct SigningTxData { pub async fn find_pk< C: crate::ledger::queries::Client + Sync, U: WalletUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -88,7 +94,8 @@ pub async fn find_pk< ) -> Result { match addr { Address::Established(_) => { - println!( + display_line!( + IO, "Looking-up public key of {} from the ledger...", addr.encode() ); @@ -147,8 +154,9 @@ pub fn find_key_by_pk( /// possible. If no explicit signer given, use the `default`. If no `default` /// is given, an `Error` is returned. pub async fn tx_signers< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -171,7 +179,7 @@ pub async fn tx_signers< Some(signer) if signer == masp() => Ok(vec![masp_tx_key().ref_to()]), Some(signer) => Ok(vec![ - find_pk::(client, wallet, &signer, args.password.clone()) + find_pk::(client, wallet, &signer, args.password.clone()) .await?, ]), None => other_err( @@ -219,7 +227,11 @@ pub fn sign_tx( } }) .collect::>(); - tx.sign_raw(signing_tx_keypairs, account_public_keys_map); + tx.sign_raw( + signing_tx_keypairs, + account_public_keys_map, + signing_data.owner, + ); } let fee_payer_keypair = @@ -231,22 +243,24 @@ pub fn sign_tx( /// Return the necessary data regarding an account to be able to generate a /// multisignature section pub async fn aux_signing_data< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, args: &args::Tx, - owner: &Option
, + owner: Option
, default_signer: Option
, ) -> Result { let public_keys = if owner.is_some() || args.wrapper_fee_payer.is_none() { - tx_signers::(client, wallet, args, default_signer.clone()).await? + tx_signers::(client, wallet, args, default_signer.clone()) + .await? } else { vec![] }; - let (account_public_keys_map, threshold) = match owner { + let (account_public_keys_map, threshold) = match &owner { Some(owner @ Address::Established(_)) => { let account = rpc::get_account_info::(client, owner).await?; if let Some(account) = account { @@ -284,6 +298,7 @@ pub async fn aux_signing_data< } Ok(SigningTxData { + owner, public_keys, threshold, account_public_keys_map, @@ -291,132 +306,6 @@ pub async fn aux_signing_data< }) } -#[cfg(not(feature = "mainnet"))] -/// Solve the PoW challenge if balance is insufficient to pay transaction fees -/// or if solution is explicitly requested. -pub async fn solve_pow_challenge( - client: &C, - args: &args::Tx, - requires_pow: bool, - total_fee: Amount, - balance: Amount, - source: Address, -) -> Option { - let is_bal_sufficient = total_fee <= balance; - if !is_bal_sufficient { - let token_addr = args.fee_token.clone(); - let err_msg = format!( - "The wrapper transaction source doesn't have enough balance to \ - pay fee {}, got {}.", - format_denominated_amount(client, &token_addr, total_fee).await, - format_denominated_amount(client, &token_addr, balance).await, - ); - if !args.force && cfg!(feature = "mainnet") { - panic!("{}", err_msg); - } - } - // A PoW solution can be used to allow zero-fee testnet transactions - // If the address derived from the keypair doesn't have enough balance - // to pay for the fee, allow to find a PoW solution instead. - if (requires_pow || !is_bal_sufficient) && !args.dump_tx { - println!("The transaction requires the completion of a PoW challenge."); - // Obtain a PoW challenge for faucet withdrawal - let challenge = rpc::get_testnet_pow_challenge(client, source).await; - - // Solve the solution, this blocks until a solution is found - let solution = challenge.solve(); - Some(solution) - } else { - None - } -} - -#[cfg(not(feature = "mainnet"))] -/// Update the PoW challenge inside the given transaction -pub async fn update_pow_challenge( - client: &C, - args: &args::Tx, - tx: &mut Tx, - requires_pow: bool, - source: Address, -) { - let gas_cost_key = parameter_storage::get_gas_cost_key(); - let minimum_fee = match rpc::query_storage_value::< - C, - BTreeMap, - >(client, &gas_cost_key) - .await - .and_then(|map| { - map.get(&args.fee_token) - .map(ToOwned::to_owned) - .ok_or_else(|| Error::Other("no fee found".to_string())) - }) { - Ok(amount) => amount, - Err(_e) => { - eprintln!( - "Could not retrieve the gas cost for token {}", - args.fee_token - ); - if !args.force { - panic!(); - } else { - token::Amount::default() - } - } - }; - let fee_amount = match args.fee_amount { - Some(amount) => { - let validated_fee_amount = - validate_amount(client, amount, &args.fee_token, args.force) - .await - .expect("Expected to be able to validate fee"); - - let amount = - Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); - - if amount >= minimum_fee { - amount - } else if !args.force { - // Update the fee amount if it's not enough - println!( - "The provided gas price {} is less than the minimum \ - amount required {}, changing it to match the minimum", - amount.to_string_native(), - minimum_fee.to_string_native() - ); - minimum_fee - } else { - amount - } - } - None => minimum_fee, - }; - let total_fee = fee_amount * u64::from(args.gas_limit); - - let balance_key = token::balance_key(&args.fee_token, &source); - let balance = - rpc::query_storage_value::(client, &balance_key) - .await - .unwrap_or_default(); - - if let TxType::Wrapper(wrapper) = &mut tx.header.tx_type { - let pow_solution = solve_pow_challenge( - client, - args, - requires_pow, - total_fee, - balance, - source, - ) - .await; - wrapper.fee = Fee { - amount_per_gas_unit: fee_amount, - token: args.fee_token.clone(), - }; - wrapper.pow_solution = pow_solution; - } -} - /// Informations about the post-tx balance of the tx's source. Used to correctly /// handle fee validation in the wrapper tx pub struct TxSourcePostBalance { @@ -433,8 +322,9 @@ pub struct TxSourcePostBalance { /// progress on chain. #[allow(clippy::too_many_arguments)] pub async fn wrap_tx< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, V: ShieldedUtils, + IO: Io, >( client: &C, shielded: &mut ShieldedContext, @@ -443,7 +333,6 @@ pub async fn wrap_tx< tx_source_balance: Option, epoch: Epoch, fee_payer: common::PublicKey, - #[cfg(not(feature = "mainnet"))] requires_pow: bool, ) -> Option { let fee_payer_address = Address::from(&fee_payer); // Validate fee amount and token @@ -460,7 +349,8 @@ pub async fn wrap_tx< }) { Ok(amount) => amount, Err(_e) => { - eprintln!( + edisplay_line!( + IO, "Could not retrieve the gas cost for token {}", args.fee_token ); @@ -473,10 +363,14 @@ pub async fn wrap_tx< }; let fee_amount = match args.fee_amount { Some(amount) => { - let validated_fee_amount = - validate_amount(client, amount, &args.fee_token, args.force) - .await - .expect("Expected to be able to validate fee"); + let validated_fee_amount = validate_amount::<_, IO>( + client, + amount, + &args.fee_token, + args.force, + ) + .await + .expect("Expected to be able to validate fee"); let amount = Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); @@ -485,7 +379,8 @@ pub async fn wrap_tx< amount } else if !args.force { // Update the fee amount if it's not enough - println!( + display_line!( + IO, "The provided gas price {} is less than the minimum \ amount required {}, changing it to match the minimum", amount.to_string_native(), @@ -546,7 +441,7 @@ pub async fn wrap_tx< }; match shielded - .gen_shielded_transfer(client, transfer_args) + .gen_shielded_transfer::<_, IO>(client, transfer_args) .await { Ok(Some(ShieldedTransfer { @@ -585,7 +480,6 @@ pub async fn wrap_tx< if u64::try_from(descriptions).unwrap() > descriptions_limit && !args.force - && cfg!(feature = "mainnet") { panic!( "Fee unshielding descriptions exceed the limit" @@ -596,16 +490,20 @@ pub async fn wrap_tx< (Some(transaction), Some(unshielding_epoch)) } Ok(None) => { - eprintln!("Missing unshielding transaction"); - if !args.force && cfg!(feature = "mainnet") { + edisplay_line!(IO, "Missing unshielding transaction"); + if !args.force { panic!(); } (None, None) } Err(e) => { - eprintln!("Error in fee unshielding generation: {}", e); - if !args.force && cfg!(feature = "mainnet") { + edisplay_line!( + IO, + "Error in fee unshielding generation: {}", + e + ); + if !args.force { panic!(); } @@ -617,17 +515,21 @@ pub async fn wrap_tx< let err_msg = format!( "The wrapper transaction source doesn't have enough \ balance to pay fee {}, balance: {}.", - format_denominated_amount(client, &token_addr, total_fee) - .await, - format_denominated_amount( + format_denominated_amount::<_, IO>( + client, + &token_addr, + total_fee + ) + .await, + format_denominated_amount::<_, IO>( client, &token_addr, updated_balance ) .await, ); - eprintln!("{}", err_msg); - if !args.force && cfg!(feature = "mainnet") { + edisplay_line!(IO, "{}", err_msg); + if !args.force { panic!("{}", err_msg); } @@ -636,7 +538,8 @@ pub async fn wrap_tx< } _ => { if args.fee_unshield.is_some() { - println!( + display_line!( + IO, "Enough transparent balance to pay fees: the fee \ unshielding spending key will be ignored" ); @@ -653,17 +556,6 @@ pub async fn wrap_tx< namada_core::types::hash::Hash(hasher.finalize().into()) }); - #[cfg(not(feature = "mainnet"))] - let pow_solution = solve_pow_challenge( - client, - args, - requires_pow, - total_fee, - updated_balance, - fee_payer_address, - ) - .await; - tx.add_wrapper( Fee { amount_per_gas_unit: fee_amount, @@ -673,8 +565,6 @@ pub async fn wrap_tx< epoch, // TODO: partially validate the gas limit in client args.gas_limit, - #[cfg(not(feature = "mainnet"))] - pow_solution, unshield_section_hash, ); @@ -717,7 +607,10 @@ fn make_ledger_amount_addr( /// Adds a Ledger output line describing a given transaction amount and asset /// type -async fn make_ledger_amount_asset( +async fn make_ledger_amount_asset< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, tokens: &HashMap, output: &mut Vec, @@ -729,7 +622,8 @@ async fn make_ledger_amount_asset( if let Some((token, _, _epoch)) = assets.get(token) { // If the AssetType can be decoded, then at least display Addressees let formatted_amt = - format_denominated_amount(client, token, amount.into()).await; + format_denominated_amount::<_, IO>(client, token, amount.into()) + .await; if let Some(token) = tokens.get(token) { output .push( @@ -815,6 +709,7 @@ fn format_outputs(output: &mut Vec) { /// transactions pub async fn make_ledger_masp_endpoints< C: crate::ledger::queries::Client + Sync, + IO: Io, >( client: &C, tokens: &HashMap, @@ -838,7 +733,7 @@ pub async fn make_ledger_masp_endpoints< for sapling_input in builder.builder.sapling_inputs() { let vk = ExtendedViewingKey::from(*sapling_input.key()); output.push(format!("Sender : {}", vk)); - make_ledger_amount_asset( + make_ledger_amount_asset::<_, IO>( client, tokens, output, @@ -865,7 +760,7 @@ pub async fn make_ledger_masp_endpoints< for sapling_output in builder.builder.sapling_outputs() { let pa = PaymentAddress::from(sapling_output.address()); output.push(format!("Destination : {}", pa)); - make_ledger_amount_asset( + make_ledger_amount_asset::<_, IO>( client, tokens, output, @@ -891,8 +786,9 @@ pub async fn make_ledger_masp_endpoints< /// Internal method used to generate transaction test vectors #[cfg(feature = "std")] pub async fn generate_test_vector< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -907,7 +803,8 @@ pub async fn generate_test_vector< // Contract the large data blobs in the transaction tx.wallet_filter(); // Convert the transaction to Ledger format - let decoding = to_ledger_vector(client, wallet, &tx).await?; + let decoding = + to_ledger_vector::<_, _, IO>(client, wallet, &tx).await?; let output = serde_json::to_string(&decoding) .map_err(|e| Error::from(EncodingError::Serde(e.to_string())))?; // Record the transaction at the identified path @@ -946,32 +843,38 @@ pub async fn generate_test_vector< /// Converts the given transaction to the form that is displayed on the Ledger /// device pub async fn to_ledger_vector< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, tx: &Tx, ) -> Result { let init_account_hash = - query_wasm_code_hash(client, TX_INIT_ACCOUNT_WASM).await?; + query_wasm_code_hash::<_, IO>(client, TX_INIT_ACCOUNT_WASM).await?; let init_validator_hash = - query_wasm_code_hash(client, TX_INIT_VALIDATOR_WASM).await?; + query_wasm_code_hash::<_, IO>(client, TX_INIT_VALIDATOR_WASM).await?; let init_proposal_hash = - query_wasm_code_hash(client, TX_INIT_PROPOSAL).await?; + query_wasm_code_hash::<_, IO>(client, TX_INIT_PROPOSAL).await?; let vote_proposal_hash = - query_wasm_code_hash(client, TX_VOTE_PROPOSAL).await?; - let reveal_pk_hash = query_wasm_code_hash(client, TX_REVEAL_PK).await?; + query_wasm_code_hash::<_, IO>(client, TX_VOTE_PROPOSAL).await?; + let reveal_pk_hash = + query_wasm_code_hash::<_, IO>(client, TX_REVEAL_PK).await?; let update_account_hash = - query_wasm_code_hash(client, TX_UPDATE_ACCOUNT_WASM).await?; - let transfer_hash = query_wasm_code_hash(client, TX_TRANSFER_WASM).await?; - let ibc_hash = query_wasm_code_hash(client, TX_IBC_WASM).await?; - let bond_hash = query_wasm_code_hash(client, TX_BOND_WASM).await?; - let unbond_hash = query_wasm_code_hash(client, TX_UNBOND_WASM).await?; - let withdraw_hash = query_wasm_code_hash(client, TX_WITHDRAW_WASM).await?; + query_wasm_code_hash::<_, IO>(client, TX_UPDATE_ACCOUNT_WASM).await?; + let transfer_hash = + query_wasm_code_hash::<_, IO>(client, TX_TRANSFER_WASM).await?; + let ibc_hash = query_wasm_code_hash::<_, IO>(client, TX_IBC_WASM).await?; + let bond_hash = query_wasm_code_hash::<_, IO>(client, TX_BOND_WASM).await?; + let unbond_hash = + query_wasm_code_hash::<_, IO>(client, TX_UNBOND_WASM).await?; + let withdraw_hash = + query_wasm_code_hash::<_, IO>(client, TX_WITHDRAW_WASM).await?; let change_commission_hash = - query_wasm_code_hash(client, TX_CHANGE_COMMISSION_WASM).await?; - let user_hash = query_wasm_code_hash(client, VP_USER_WASM).await?; + query_wasm_code_hash::<_, IO>(client, TX_CHANGE_COMMISSION_WASM) + .await?; + let user_hash = query_wasm_code_hash::<_, IO>(client, VP_USER_WASM).await?; // To facilitate lookups of human-readable token names let tokens: HashMap = wallet @@ -1264,7 +1167,7 @@ pub async fn to_ledger_vector< tv.name = "Transfer 0".to_string(); tv.output.push("Type : Transfer".to_string()); - make_ledger_masp_endpoints( + make_ledger_masp_endpoints::<_, IO>( client, &tokens, &mut tv.output, @@ -1273,7 +1176,7 @@ pub async fn to_ledger_vector< &asset_types, ) .await; - make_ledger_masp_endpoints( + make_ledger_masp_endpoints::<_, IO>( client, &tokens, &mut tv.output_expert, @@ -1446,13 +1349,13 @@ pub async fn to_ledger_vector< if let Some(wrapper) = tx.header.wrapper() { let gas_token = wrapper.fee.token.clone(); - let gas_limit = format_denominated_amount( + let gas_limit = format_denominated_amount::<_, IO>( client, &gas_token, Amount::from(wrapper.gas_limit), ) .await; - let fee_amount_per_gas_unit = format_denominated_amount( + let fee_amount_per_gas_unit = format_denominated_amount::<_, IO>( client, &gas_token, wrapper.fee.amount_per_gas_unit, diff --git a/shared/src/ledger/tx.rs b/shared/src/sdk/tx.rs similarity index 85% rename from shared/src/ledger/tx.rs rename to shared/src/sdk/tx.rs index 5ebc7f52bba..06dc144ad64 100644 --- a/shared/src/ledger/tx.rs +++ b/shared/src/sdk/tx.rs @@ -34,8 +34,6 @@ use namada_core::types::transaction::pgf::UpdateStewardCommission; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; -use super::rpc::query_wasm_code_hash; -use super::signing::{self, TxSourcePostBalance}; use crate::ibc::applications::transfer::msgs::transfer::MsgTransfer; use crate::ibc::applications::transfer::packet::PacketData; use crate::ibc::applications::transfer::PrefixedCoin; @@ -43,20 +41,22 @@ use crate::ibc::core::ics04_channel::timeout::TimeoutHeight; use crate::ibc::core::timestamp::Timestamp as IbcTimestamp; use crate::ibc::core::Msg; use crate::ibc::Height as IbcHeight; -use crate::ledger::args::{self, InputAmount}; use crate::ledger::ibc::storage::ibc_denom_key; -use crate::ledger::masp::TransferErr::Build; -use crate::ledger::masp::{ShieldedContext, ShieldedTransfer, ShieldedUtils}; -use crate::ledger::rpc::{ - self, format_denominated_amount, validate_amount, TxBroadcastData, - TxResponse, -}; -use crate::ledger::wallet::{Wallet, WalletUtils}; use crate::proto::{MaspBuilder, Tx}; +use crate::sdk::args::{self, InputAmount}; +use crate::sdk::error::{EncodingError, Error, QueryError, Result, TxError}; +use crate::sdk::masp::TransferErr::Build; +use crate::sdk::masp::{ShieldedContext, ShieldedTransfer, ShieldedUtils}; +use crate::sdk::rpc::{ + self, format_denominated_amount, query_wasm_code_hash, validate_amount, + TxBroadcastData, TxResponse, +}; +use crate::sdk::signing::{self, TxSourcePostBalance}; +use crate::sdk::wallet::{Wallet, WalletUtils}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; use crate::types::control_flow::{time, ProceedOrElse}; -use crate::types::error::{EncodingError, Error, QueryError, Result, TxError}; +use crate::types::io::Io; use crate::types::key::*; use crate::types::masp::TransferTarget; use crate::types::storage::Epoch; @@ -64,7 +64,7 @@ use crate::types::time::DateTimeUtc; use crate::types::transaction::account::{InitAccount, UpdateAccount}; use crate::types::transaction::{pos, TxType}; use crate::types::{storage, token}; -use crate::vm; +use crate::{display_line, edisplay_line, vm}; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; @@ -121,7 +121,7 @@ impl ProcessTxResponse { } /// Build and dump a transaction either to file or to screen -pub fn dump_tx(args: &args::Tx, tx: Tx) { +pub fn dump_tx(args: &args::Tx, tx: Tx) { let tx_id = tx.header_hash(); let serialized_tx = tx.serialize(); match args.output_folder.to_owned() { @@ -131,14 +131,15 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { let out = File::create(&tx_path).unwrap(); serde_json::to_writer_pretty(out, &serialized_tx) .expect("Should be able to write to file."); - println!( + display_line!( + IO, "Transaction serialized to {}.", tx_path.to_string_lossy() ); } None => { - println!("Below the serialized transaction: \n"); - println!("{}", serialized_tx) + display_line!(IO, "Below the serialized transaction: \n"); + display_line!(IO, "{}", serialized_tx) } } } @@ -147,9 +148,10 @@ pub fn dump_tx(args: &args::Tx, tx: Tx) { /// to it. #[allow(clippy::too_many_arguments)] pub async fn prepare_tx< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, _wallet: &mut Wallet, @@ -158,12 +160,11 @@ pub async fn prepare_tx< tx: &mut Tx, fee_payer: common::PublicKey, tx_source_balance: Option, - #[cfg(not(feature = "mainnet"))] requires_pow: bool, ) -> Result> { if !args.dry_run { let epoch = rpc::query_epoch(client).await?; - Ok(signing::wrap_tx( + Ok(signing::wrap_tx::<_, _, IO>( client, shielded, tx, @@ -171,8 +172,6 @@ pub async fn prepare_tx< tx_source_balance, epoch, fee_payer, - #[cfg(not(feature = "mainnet"))] - requires_pow, ) .await) } else { @@ -183,8 +182,9 @@ pub async fn prepare_tx< /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. pub async fn process_tx< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -202,7 +202,7 @@ pub async fn process_tx< // println!("HTTP request body: {}", request_body); if args.dry_run || args.dry_run_wrapper { - expect_dry_broadcast(TxBroadcastData::DryRun(tx), client).await + expect_dry_broadcast::<_, IO>(TxBroadcastData::DryRun(tx), client).await } else { // We use this to determine when the wrapper tx makes it on-chain let wrapper_hash = tx.header_hash().to_string(); @@ -222,13 +222,13 @@ pub async fn process_tx< // of masp epoch Either broadcast or submit transaction and // collect result into sum type if args.broadcast_only { - broadcast_tx(client, &to_broadcast) + broadcast_tx::<_, IO>(client, &to_broadcast) .await .map(ProcessTxResponse::Broadcast) } else { - match submit_tx(client, to_broadcast).await { + match submit_tx::<_, IO>(client, to_broadcast).await { Ok(x) => { - save_initialized_accounts::( + save_initialized_accounts::( wallet, args, x.initialized_accounts.clone(), @@ -243,20 +243,20 @@ pub async fn process_tx< } /// Check if a reveal public key transaction is needed -pub async fn is_reveal_pk_needed( +pub async fn is_reveal_pk_needed( client: &C, address: &Address, force: bool, ) -> Result where - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, { // Check if PK revealed Ok(force || !has_revealed_pk(client, address).await?) } /// Check if the public key for the given address has been revealed -pub async fn has_revealed_pk( +pub async fn has_revealed_pk( client: &C, address: &Address, ) -> Result { @@ -265,9 +265,10 @@ pub async fn has_revealed_pk( /// Submit transaction to reveal the given public key pub async fn build_reveal_pk< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -277,11 +278,12 @@ pub async fn build_reveal_pk< public_key: &common::PublicKey, fee_payer: &common::PublicKey, ) -> Result<(Tx, Option)> { - println!( + display_line!( + IO, "Submitting a tx to reveal the public key for address {address}..." ); - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -296,10 +298,10 @@ pub async fn build_reveal_pk< } /// Broadcast a transaction to be included in the blockchain and checks that -/// the tx has been successfully included into the mempool of a validator +/// the tx has been successfully included into the mempool of a node /// /// In the case of errors in any of those stages, an error message is returned -pub async fn broadcast_tx( +pub async fn broadcast_tx( rpc_cli: &C, to_broadcast: &TxBroadcastData, ) -> Result { @@ -324,12 +326,20 @@ pub async fn broadcast_tx( lift_rpc_error(rpc_cli.broadcast_tx_sync(tx.to_bytes().into()).await)?; if response.code == 0.into() { - println!("Transaction added to mempool: {:?}", response); + display_line!(IO, "Transaction added to mempool: {:?}", response); // Print the transaction identifiers to enable the extraction of // acceptance/application results later { - println!("Wrapper transaction hash: {:?}", wrapper_tx_hash); - println!("Inner transaction hash: {:?}", decrypted_tx_hash); + display_line!( + IO, + "Wrapper transaction hash: {:?}", + wrapper_tx_hash + ); + display_line!( + IO, + "Inner transaction hash: {:?}", + decrypted_tx_hash + ); } Ok(response) } else { @@ -349,12 +359,12 @@ pub async fn broadcast_tx( /// 3. The decrypted payload of the tx has been included on the blockchain. /// /// In the case of errors in any of those stages, an error message is returned -pub async fn submit_tx( +pub async fn submit_tx( client: &C, to_broadcast: TxBroadcastData, ) -> Result where - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, { let (_, wrapper_hash, decrypted_hash) = match &to_broadcast { TxBroadcastData::Live { @@ -366,7 +376,7 @@ where }?; // Broadcast the supplied transaction - broadcast_tx(client, &to_broadcast).await?; + broadcast_tx::<_, IO>(client, &to_broadcast).await?; let deadline = time::Instant::now() + time::Duration::from_secs( @@ -380,19 +390,22 @@ where ); let parsed = { - let wrapper_query = - crate::ledger::rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); - let event = rpc::query_tx_status(client, wrapper_query, deadline) - .await - .proceed_or(TxError::AcceptTimeout)?; + let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); + let event = + rpc::query_tx_status::<_, IO>(client, wrapper_query, deadline) + .await + .proceed_or(TxError::AcceptTimeout)?; let parsed = TxResponse::from_event(event); - let tx_to_str = |parsed| { serde_json::to_string_pretty(parsed).map_err(|err| { Error::from(EncodingError::Serde(err.to_string())) }) }; - println!("Transaction accepted with result: {}", tx_to_str(&parsed)?); + display_line!( + IO, + "Transaction accepted with result: {}", + tx_to_str(&parsed)? + ); // The transaction is now on chain. We wait for it to be decrypted // and applied if parsed.code == 0.to_string() { @@ -400,11 +413,16 @@ where // payload makes its way onto the blockchain let decrypted_query = rpc::TxEventQuery::Applied(decrypted_hash.as_str()); - let event = rpc::query_tx_status(client, decrypted_query, deadline) - .await - .proceed_or(TxError::AppliedTimeout)?; + let event = rpc::query_tx_status::<_, IO>( + client, + decrypted_query, + deadline, + ) + .await + .proceed_or(TxError::AppliedTimeout)?; let parsed = TxResponse::from_event(event); - println!( + display_line!( + IO, "Transaction applied with result: {}", tx_to_str(&parsed)? ); @@ -441,7 +459,7 @@ pub fn decode_component( } /// Save accounts initialized from a tx into the wallet, if any. -pub async fn save_initialized_accounts( +pub async fn save_initialized_accounts( wallet: &mut Wallet, args: &args::Tx, initialized_accounts: Vec
, @@ -449,7 +467,8 @@ pub async fn save_initialized_accounts( let len = initialized_accounts.len(); if len != 0 { // Store newly initialized account addresses in the wallet - println!( + display_line!( + IO, "The transaction initialized {} new account{}", len, if len == 1 { "" } else { "s" } @@ -480,12 +499,16 @@ pub async fn save_initialized_accounts( ); match added { Some(new_alias) if new_alias != encoded => { - println!( + display_line!( + IO, "Added alias {} for address {}.", - new_alias, encoded + new_alias, + encoded ); } - _ => println!("No alias added for address {}.", encoded), + _ => { + display_line!(IO, "No alias added for address {}.", encoded) + } }; } } @@ -493,9 +516,10 @@ pub async fn save_initialized_accounts( /// Submit validator comission rate change pub async fn build_validator_commission_change< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -515,7 +539,11 @@ pub async fn build_validator_commission_change< let validator = validator.clone(); if rpc::is_validator(client, &validator).await? { if rate < Dec::zero() || rate > Dec::one() { - eprintln!("Invalid new commission rate, received {}", rate); + edisplay_line!( + IO, + "Invalid new commission rate, received {}", + rate + ); return Err(Error::from(TxError::InvalidCommissionRate(rate))); } @@ -535,7 +563,8 @@ pub async fn build_validator_commission_change< if rate.abs_diff(&commission_rate) > max_commission_change_per_epoch { - eprintln!( + edisplay_line!( + IO, "New rate is too large of a change with respect to \ the predecessor epoch in which the rate will take \ effect." @@ -548,14 +577,14 @@ pub async fn build_validator_commission_change< } } None => { - eprintln!("Error retrieving from storage"); + edisplay_line!(IO, "Error retrieving from storage"); if !tx_args.force { return Err(Error::from(TxError::Retrieval)); } } } } else { - eprintln!("The given address {validator} is not a validator."); + edisplay_line!(IO, "The given address {validator} is not a validator."); if !tx_args.force { return Err(Error::from(TxError::InvalidValidatorAddress( validator, @@ -568,7 +597,7 @@ pub async fn build_validator_commission_change< new_rate: rate, }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -584,9 +613,10 @@ pub async fn build_validator_commission_change< /// Craft transaction to update a steward commission pub async fn build_update_steward_commission< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -600,7 +630,7 @@ pub async fn build_update_steward_commission< gas_payer: &common::PublicKey, ) -> Result<(Tx, Option)> { if !rpc::is_steward(client, &steward).await && !tx_args.force { - eprintln!("The given address {} is not a steward.", &steward); + edisplay_line!(IO, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; @@ -608,7 +638,10 @@ pub async fn build_update_steward_commission< .map_err(|e| TxError::InvalidStewardCommission(e.to_string()))?; if !commission.is_valid() && !tx_args.force { - eprintln!("The sum of all percentage must not be greater than 1."); + edisplay_line!( + IO, + "The sum of all percentage must not be greater than 1." + ); return Err(Error::from(TxError::InvalidStewardCommission( "Commission sum is greater than 1.".to_string(), ))); @@ -619,7 +652,7 @@ pub async fn build_update_steward_commission< commission: commission.reward_distribution, }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -635,9 +668,10 @@ pub async fn build_update_steward_commission< /// Craft transaction to resign as a steward pub async fn build_resign_steward< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -650,11 +684,11 @@ pub async fn build_resign_steward< gas_payer: &common::PublicKey, ) -> Result<(Tx, Option)> { if !rpc::is_steward(client, &steward).await && !tx_args.force { - eprintln!("The given address {} is not a steward.", &steward); + edisplay_line!(IO, "The given address {} is not a steward.", &steward); return Err(Error::from(TxError::InvalidSteward(steward.clone()))); }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -670,9 +704,10 @@ pub async fn build_resign_steward< /// Submit transaction to unjail a jailed validator pub async fn build_unjail_validator< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -685,7 +720,11 @@ pub async fn build_unjail_validator< fee_payer: common::PublicKey, ) -> Result<(Tx, Option)> { if !rpc::is_validator(client, &validator).await? { - eprintln!("The given address {} is not a validator.", &validator); + edisplay_line!( + IO, + "The given address {} is not a validator.", + &validator + ); if !tx_args.force { return Err(Error::from(TxError::InvalidValidatorAddress( validator.clone(), @@ -706,7 +745,8 @@ pub async fn build_unjail_validator< )) })?; if validator_state_at_pipeline != ValidatorState::Jailed { - eprintln!( + edisplay_line!( + IO, "The given validator address {} is not jailed at the pipeline \ epoch when it would be restored to one of the validator sets.", &validator @@ -728,7 +768,8 @@ pub async fn build_unjail_validator< let eligible_epoch = last_slash_epoch + params.slash_processing_epoch_offset(); if current_epoch < eligible_epoch { - eprintln!( + edisplay_line!( + IO, "The given validator address {} is currently frozen and \ not yet eligible to be unjailed.", &validator @@ -754,7 +795,7 @@ pub async fn build_unjail_validator< Err(err) => return Err(err), } - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -770,9 +811,10 @@ pub async fn build_unjail_validator< /// Submit transaction to withdraw an unbond pub async fn build_withdraw< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -787,9 +829,12 @@ pub async fn build_withdraw< ) -> Result<(Tx, Option)> { let epoch = rpc::query_epoch(client).await?; - let validator = - known_validator_or_err(validator.clone(), tx_args.force, client) - .await?; + let validator = known_validator_or_err::<_, IO>( + validator.clone(), + tx_args.force, + client, + ) + .await?; let source = source.clone(); @@ -804,26 +849,29 @@ pub async fn build_withdraw< .await?; if tokens.is_zero() { - eprintln!( + edisplay_line!( + IO, "There are no unbonded bonds ready to withdraw in the current \ epoch {}.", epoch ); - rpc::query_and_print_unbonds(client, &bond_source, &validator).await?; + rpc::query_and_print_unbonds::<_, IO>(client, &bond_source, &validator) + .await?; if !tx_args.force { return Err(Error::from(TxError::NoUnbondReady(epoch))); } } else { - println!( + display_line!( + IO, "Found {} tokens that can be withdrawn.", tokens.to_string_native() ); - println!("Submitting transaction to withdraw them..."); + display_line!(IO, "Submitting transaction to withdraw them..."); } let data = pos::Withdraw { validator, source }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -839,9 +887,10 @@ pub async fn build_withdraw< /// Submit a transaction to unbond pub async fn build_unbond< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -860,18 +909,24 @@ pub async fn build_unbond< let bond_source = source.clone().unwrap_or_else(|| validator.clone()); if !tx_args.force { - known_validator_or_err(validator.clone(), tx_args.force, client) - .await?; + known_validator_or_err::<_, IO>( + validator.clone(), + tx_args.force, + client, + ) + .await?; let bond_amount = rpc::query_bond(client, &bond_source, &validator, None).await?; - println!( + display_line!( + IO, "Bond amount available for unbonding: {} NAM", bond_amount.to_string_native() ); if amount > bond_amount { - eprintln!( + edisplay_line!( + IO, "The total bonds of the source {} is lower than the amount to \ be unbonded. Amount to unbond is {} and the total bonds is \ {}.", @@ -906,7 +961,7 @@ pub async fn build_unbond< source: source.clone(), }; - let (tx, epoch) = build( + let (tx, epoch) = build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -922,7 +977,7 @@ pub async fn build_unbond< } /// Query the unbonds post-tx -pub async fn query_unbonds( +pub async fn query_unbonds( client: &C, args: args::Unbond, latest_withdrawal_pre: Option<(Epoch, token::Amount)>, @@ -951,7 +1006,8 @@ pub async fn query_unbonds( match latest_withdraw_epoch_post.cmp(&latest_withdraw_epoch_pre) { std::cmp::Ordering::Less => { if args.tx.force { - eprintln!( + edisplay_line!( + IO, "Unexpected behavior reading the unbonds data has \ occurred" ); @@ -960,7 +1016,8 @@ pub async fn query_unbonds( } } std::cmp::Ordering::Equal => { - println!( + display_line!( + IO, "Amount {} withdrawable starting from epoch {}", (latest_withdraw_amount_post - latest_withdraw_amount_pre) .to_string_native(), @@ -968,7 +1025,8 @@ pub async fn query_unbonds( ); } std::cmp::Ordering::Greater => { - println!( + display_line!( + IO, "Amount {} withdrawable starting from epoch {}", latest_withdraw_amount_post.to_string_native(), latest_withdraw_epoch_post, @@ -976,7 +1034,8 @@ pub async fn query_unbonds( } } } else { - println!( + display_line!( + IO, "Amount {} withdrawable starting from epoch {}", latest_withdraw_amount_post.to_string_native(), latest_withdraw_epoch_post, @@ -987,9 +1046,10 @@ pub async fn query_unbonds( /// Submit a transaction to bond pub async fn build_bond< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1004,15 +1064,20 @@ pub async fn build_bond< }: args::Bond, fee_payer: common::PublicKey, ) -> Result<(Tx, Option)> { - let validator = - known_validator_or_err(validator.clone(), tx_args.force, client) - .await?; + let validator = known_validator_or_err::<_, IO>( + validator.clone(), + tx_args.force, + client, + ) + .await?; // Check that the source address exists on chain let source = match source.clone() { - Some(source) => source_exists_or_err(source, tx_args.force, client) - .await - .map(Some), + Some(source) => { + source_exists_or_err::<_, IO>(source, tx_args.force, client) + .await + .map(Some) + } None => Ok(source.clone()), }?; // Check bond's source (source for delegation or validator for self-bonds) @@ -1021,7 +1086,7 @@ pub async fn build_bond< let balance_key = token::balance_key(&native_token, bond_source); // TODO Should we state the same error message for the native token? - let post_balance = check_balance_too_low_err( + let post_balance = check_balance_too_low_err::<_, IO>( &native_token, bond_source, amount, @@ -1042,7 +1107,7 @@ pub async fn build_bond< source, }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1058,9 +1123,10 @@ pub async fn build_bond< /// Build a default proposal governance pub async fn build_default_proposal< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1094,7 +1160,7 @@ pub async fn build_default_proposal< }; Ok(()) }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1110,9 +1176,10 @@ pub async fn build_default_proposal< /// Build a proposal vote pub async fn build_vote_proposal< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1176,7 +1243,7 @@ pub async fn build_vote_proposal< delegations, }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1192,9 +1259,10 @@ pub async fn build_vote_proposal< /// Build a pgf funding proposal governance pub async fn build_pgf_funding_proposal< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1220,7 +1288,7 @@ pub async fn build_pgf_funding_proposal< data.content = extra_section_hash; Ok(()) }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1236,9 +1304,10 @@ pub async fn build_pgf_funding_proposal< /// Build a pgf funding proposal governance pub async fn build_pgf_stewards_proposal< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1265,7 +1334,7 @@ pub async fn build_pgf_stewards_proposal< Ok(()) }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1281,9 +1350,10 @@ pub async fn build_pgf_stewards_proposal< /// Submit an IBC transfer pub async fn build_ibc_transfer< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1292,16 +1362,22 @@ pub async fn build_ibc_transfer< fee_payer: common::PublicKey, ) -> Result<(Tx, Option)> { // Check that the source address exists on chain - let source = - source_exists_or_err(args.source.clone(), args.tx.force, client) - .await?; + let source = source_exists_or_err::<_, IO>( + args.source.clone(), + args.tx.force, + client, + ) + .await?; // We cannot check the receiver // validate the amount given - let validated_amount = - validate_amount(client, args.amount, &args.token, args.tx.force) - .await - .expect("expected to validate amount"); + let validated_amount = validate_amount::<_, IO>( + client, + args.amount, + &args.token, + args.tx.force, + ) + .await?; if validated_amount.canonical().denom.0 != 0 { return Err(Error::Other(format!( "The amount for the IBC transfer should be an integer: {}", @@ -1312,7 +1388,7 @@ pub async fn build_ibc_transfer< // Check source balance let balance_key = token::balance_key(&args.token, &source); - let post_balance = check_balance_too_low_err( + let post_balance = check_balance_too_low_err::<_, IO>( &args.token, &source, validated_amount.amount, @@ -1327,10 +1403,12 @@ pub async fn build_ibc_transfer< token: args.token.clone(), }); - let tx_code_hash = - query_wasm_code_hash(client, args.tx_code_path.to_str().unwrap()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = query_wasm_code_hash::<_, IO>( + client, + args.tx_code_path.to_str().unwrap(), + ) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; let ibc_denom = match &args.token { Address::Internal(InternalAddress::IbcToken(hash)) => { @@ -1398,7 +1476,7 @@ pub async fn build_ibc_transfer< tx.add_code_from_hash(tx_code_hash) .add_serialized_data(data); - let epoch = prepare_tx::( + let epoch = prepare_tx::( client, wallet, shielded, @@ -1406,8 +1484,6 @@ pub async fn build_ibc_transfer< &mut tx, fee_payer, tx_source_balance, - #[cfg(not(feature = "mainnet"))] - false, ) .await?; @@ -1416,11 +1492,11 @@ pub async fn build_ibc_transfer< /// Abstraction for helping build transactions #[allow(clippy::too_many_arguments)] -pub async fn build( +pub async fn build( client: &C, wallet: &mut Wallet, shielded: &mut ShieldedContext, - tx_args: &crate::ledger::args::Tx, + tx_args: &crate::sdk::args::Tx, path: PathBuf, data: D, on_tx: F, @@ -1432,8 +1508,9 @@ where D: BorshSerialize, U: WalletUtils, V: ShieldedUtils, + IO: Io, { - build_pow_flag( + build_pow_flag::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1443,24 +1520,28 @@ where on_tx, gas_payer, tx_source_balance, - #[cfg(not(feature = "mainnet"))] - false, ) .await } #[allow(clippy::too_many_arguments)] -async fn build_pow_flag( +async fn build_pow_flag< + C: crate::ledger::queries::Client + Sync, + U, + V, + F, + D, + IO: Io, +>( client: &C, wallet: &mut Wallet, shielded: &mut ShieldedContext, - tx_args: &crate::ledger::args::Tx, + tx_args: &crate::sdk::args::Tx, path: PathBuf, mut data: D, on_tx: F, gas_payer: &common::PublicKey, tx_source_balance: Option, - #[cfg(not(feature = "mainnet"))] requires_pow: bool, ) -> Result<(Tx, Option)> where F: FnOnce(&mut Tx, &mut D) -> Result<()>, @@ -1472,15 +1553,16 @@ where let mut tx_builder = Tx::new(chain_id, tx_args.expiration); - let tx_code_hash = query_wasm_code_hash(client, path.to_string_lossy()) - .await - .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + let tx_code_hash = + query_wasm_code_hash::<_, IO>(client, path.to_string_lossy()) + .await + .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; on_tx(&mut tx_builder, &mut data)?; tx_builder.add_code_from_hash(tx_code_hash).add_data(data); - let epoch = prepare_tx::( + let epoch = prepare_tx::( client, wallet, shielded, @@ -1488,8 +1570,6 @@ where &mut tx_builder, gas_payer.clone(), tx_source_balance, - #[cfg(not(feature = "mainnet"))] - requires_pow, ) .await?; Ok((tx_builder, epoch)) @@ -1498,7 +1578,7 @@ where /// Try to decode the given asset type and add its decoding to the supplied set. /// Returns true only if a new decoding has been added to the given set. async fn add_asset_type< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: ShieldedUtils, >( asset_types: &mut HashSet<(Address, MaspDenom, Epoch)>, @@ -1519,7 +1599,7 @@ async fn add_asset_type< /// function provides the data necessary for offline wallets to present asset /// type information. async fn used_asset_types< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: ShieldedUtils, P, R, @@ -1570,9 +1650,10 @@ async fn used_asset_types< /// Submit an ordinary transfer pub async fn build_transfer< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1585,18 +1666,21 @@ pub async fn build_transfer< let token = args.token.clone(); // Check that the source address exists on chain - source_exists_or_err(source.clone(), args.tx.force, client).await?; + source_exists_or_err::<_, IO>(source.clone(), args.tx.force, client) + .await?; // Check that the target address exists on chain - target_exists_or_err(target.clone(), args.tx.force, client).await?; + target_exists_or_err::<_, IO>(target.clone(), args.tx.force, client) + .await?; // Check source balance let balance_key = token::balance_key(&token, &source); // validate the amount given let validated_amount = - validate_amount(client, args.amount, &token, args.tx.force).await?; + validate_amount::<_, IO>(client, args.amount, &token, args.tx.force) + .await?; args.amount = InputAmount::Validated(validated_amount); - let post_balance = check_balance_too_low_err::( + let post_balance = check_balance_too_low_err::( &token, &source, validated_amount.amount, @@ -1629,13 +1713,10 @@ pub async fn build_transfer< _ => None, }; - #[cfg(not(feature = "mainnet"))] - let is_source_faucet = rpc::is_faucet_account(client, &source).await; - #[cfg(feature = "mainnet")] - let is_source_faucet = false; - // Construct the shielded part of the transaction, if any - let stx_result = shielded.gen_shielded_transfer(client, args.clone()).await; + let stx_result = shielded + .gen_shielded_transfer::<_, IO>(client, args.clone()) + .await; let shielded_parts = match stx_result { Ok(stx) => Ok(stx), @@ -1703,7 +1784,7 @@ pub async fn build_transfer< }; Ok(()) }; - let (tx, unshielding_epoch) = build_pow_flag( + let (tx, unshielding_epoch) = build_pow_flag::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1713,8 +1794,6 @@ pub async fn build_transfer< add_shielded, &fee_payer, tx_source_balance, - #[cfg(not(feature = "mainnet"))] - is_source_faucet, ) .await?; // Manage the two masp epochs @@ -1741,9 +1820,10 @@ pub async fn build_transfer< /// Submit a transaction to initialize an account pub async fn build_init_account< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1757,7 +1837,8 @@ pub async fn build_init_account< }: args::TxInitAccount, fee_payer: &common::PublicKey, ) -> Result<(Tx, Option)> { - let vp_code_hash = query_wasm_code_hash_buf(client, &vp_code_path).await?; + let vp_code_hash = + query_wasm_code_hash_buf::<_, IO>(client, &vp_code_path).await?; let threshold = match threshold { Some(threshold) => threshold, @@ -1782,7 +1863,7 @@ pub async fn build_init_account< data.vp_code_hash = extra_section_hash; Ok(()) }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1798,9 +1879,10 @@ pub async fn build_init_account< /// Submit a transaction to update a VP pub async fn build_update_account< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1826,7 +1908,8 @@ pub async fn build_update_account< let vp_code_hash = match vp_code_path { Some(code_path) => { - let vp_hash = query_wasm_code_hash_buf(client, &code_path).await?; + let vp_hash = + query_wasm_code_hash_buf::<_, IO>(client, &code_path).await?; Some(vp_hash) } None => None, @@ -1850,7 +1933,7 @@ pub async fn build_update_account< data.vp_code_hash = extra_section_hash; Ok(()) }; - build( + build::<_, _, _, _, _, IO>( client, wallet, shielded, @@ -1866,9 +1949,10 @@ pub async fn build_update_account< /// Submit a custom transaction pub async fn build_custom< - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, U: WalletUtils, V: ShieldedUtils, + IO: Io, >( client: &C, wallet: &mut Wallet, @@ -1887,7 +1971,7 @@ pub async fn build_custom< Error::Other("Invalid tx deserialization.".to_string()) })? } else { - let tx_code_hash = query_wasm_code_hash_buf( + let tx_code_hash = query_wasm_code_hash_buf::<_, IO>( client, &code_path .ok_or(Error::Other("No code path supplied".to_string()))?, @@ -1900,7 +1984,7 @@ pub async fn build_custom< tx }; - let epoch = prepare_tx::( + let epoch = prepare_tx::( client, wallet, shielded, @@ -1908,21 +1992,22 @@ pub async fn build_custom< &mut tx, fee_payer.clone(), None, - #[cfg(not(feature = "mainnet"))] - false, ) .await?; Ok((tx, epoch)) } -async fn expect_dry_broadcast( +async fn expect_dry_broadcast< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( to_broadcast: TxBroadcastData, client: &C, ) -> Result { match to_broadcast { TxBroadcastData::DryRun(tx) => { - rpc::dry_run_tx(client, tx.to_bytes()).await?; + rpc::dry_run_tx::<_, IO>(client, tx.to_bytes()).await?; Ok(ProcessTxResponse::DryRun) } TxBroadcastData::Live { @@ -1940,7 +2025,10 @@ fn lift_rpc_error(res: std::result::Result) -> Result { /// Returns the given validator if the given address is a validator, /// otherwise returns an error, force forces the address through even /// if it isn't a validator -async fn known_validator_or_err( +async fn known_validator_or_err< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( validator: Address, force: bool, client: &C, @@ -1949,7 +2037,8 @@ async fn known_validator_or_err( let is_validator = rpc::is_validator(client, &validator).await?; if !is_validator { if force { - eprintln!( + edisplay_line!( + IO, "The address {} doesn't belong to any known validator account.", validator ); @@ -1965,7 +2054,7 @@ async fn known_validator_or_err( /// general pattern for checking if an address exists on the chain, or /// throwing an error if it's not forced. Takes a generic error /// message and the error type. -async fn address_exists_or_err( +async fn address_exists_or_err( addr: Address, force: bool, client: &C, @@ -1973,13 +2062,13 @@ async fn address_exists_or_err( err: F, ) -> Result
where - C: crate::ledger::queries::Client + Sync, + C: crate::sdk::queries::Client + Sync, F: FnOnce(Address) -> Error, { let addr_exists = rpc::known_address::(client, &addr).await?; if !addr_exists { if force { - eprintln!("{}", message); + edisplay_line!(IO, "{}", message); Ok(addr) } else { Err(err(addr)) @@ -1992,14 +2081,17 @@ where /// Returns the given source address if the given address exists on chain /// otherwise returns an error, force forces the address through even /// if it isn't on chain -async fn source_exists_or_err( +async fn source_exists_or_err< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( token: Address, force: bool, client: &C, ) -> Result
{ let message = format!("The source address {} doesn't exist on chain.", token); - address_exists_or_err(token, force, client, message, |err| { + address_exists_or_err::<_, _, IO>(token, force, client, message, |err| { Error::from(TxError::SourceDoesNotExist(err)) }) .await @@ -2008,14 +2100,17 @@ async fn source_exists_or_err( /// Returns the given target address if the given address exists on chain /// otherwise returns an error, force forces the address through even /// if it isn't on chain -async fn target_exists_or_err( +async fn target_exists_or_err< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( token: Address, force: bool, client: &C, ) -> Result
{ let message = format!("The target address {} doesn't exist on chain.", token); - address_exists_or_err(token, force, client, message, |err| { + address_exists_or_err::<_, _, IO>(token, force, client, message, |err| { Error::from(TxError::TargetLocationDoesNotExist(err)) }) .await @@ -2024,7 +2119,10 @@ async fn target_exists_or_err( /// Checks the balance at the given address is enough to transfer the /// given amount, along with the balance even existing. Force /// overrides this. Returns the updated balance for fee check if necessary -async fn check_balance_too_low_err( +async fn check_balance_too_low_err< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( token: &Address, source: &Address, amount: token::Amount, @@ -2039,14 +2137,21 @@ async fn check_balance_too_low_err( Some(diff) => Ok(diff), None => { if force { - eprintln!( + edisplay_line!( + IO, "The balance of the source {} of token {} is lower \ than the amount to be transferred. Amount to \ transfer is {} and the balance is {}.", source, token, - format_denominated_amount(client, token, amount).await, - format_denominated_amount(client, token, balance).await, + format_denominated_amount::<_, IO>( + client, token, amount + ) + .await, + format_denominated_amount::<_, IO>( + client, token, balance + ) + .await, ); Ok(token::Amount::default()) } else { @@ -2063,9 +2168,11 @@ async fn check_balance_too_low_err( QueryError::General(_) | QueryError::NoSuchKey(_), )) => { if force { - eprintln!( + edisplay_line!( + IO, "No balance found for the source {} of token {}", - source, token + source, + token ); Ok(token::Amount::default()) } else { @@ -2082,10 +2189,17 @@ async fn check_balance_too_low_err( } #[allow(dead_code)] -fn validate_untrusted_code_err(vp_code: &Vec, force: bool) -> Result<()> { +fn validate_untrusted_code_err( + vp_code: &Vec, + force: bool, +) -> Result<()> { if let Err(err) = vm::validate_untrusted_wasm(vp_code) { if force { - eprintln!("Validity predicate code validation failed with {}", err); + edisplay_line!( + IO, + "Validity predicate code validation failed with {}", + err + ); Ok(()) } else { Err(Error::from(TxError::WasmValidationFailure(err))) @@ -2094,11 +2208,14 @@ fn validate_untrusted_code_err(vp_code: &Vec, force: bool) -> Result<()> { Ok(()) } } -async fn query_wasm_code_hash_buf( +async fn query_wasm_code_hash_buf< + C: crate::ledger::queries::Client + Sync, + IO: Io, +>( client: &C, path: &Path, ) -> Result { - query_wasm_code_hash(client, path.to_string_lossy()).await + query_wasm_code_hash::<_, IO>(client, path.to_string_lossy()).await } /// A helper for [`fn build`] that can be used for `on_tx` arg that does nothing diff --git a/shared/src/ledger/wallet/alias.rs b/shared/src/sdk/wallet/alias.rs similarity index 100% rename from shared/src/ledger/wallet/alias.rs rename to shared/src/sdk/wallet/alias.rs diff --git a/shared/src/ledger/wallet/derivation_path.rs b/shared/src/sdk/wallet/derivation_path.rs similarity index 100% rename from shared/src/ledger/wallet/derivation_path.rs rename to shared/src/sdk/wallet/derivation_path.rs diff --git a/shared/src/ledger/wallet/keys.rs b/shared/src/sdk/wallet/keys.rs similarity index 99% rename from shared/src/ledger/wallet/keys.rs rename to shared/src/sdk/wallet/keys.rs index d13ebbd07c2..867a2b1ad0c 100644 --- a/shared/src/ledger/wallet/keys.rs +++ b/shared/src/sdk/wallet/keys.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::ledger::wallet::WalletUtils; +use crate::sdk::wallet::WalletUtils; const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; diff --git a/shared/src/ledger/wallet/mod.rs b/shared/src/sdk/wallet/mod.rs similarity index 100% rename from shared/src/ledger/wallet/mod.rs rename to shared/src/sdk/wallet/mod.rs diff --git a/shared/src/ledger/wallet/pre_genesis.rs b/shared/src/sdk/wallet/pre_genesis.rs similarity index 97% rename from shared/src/ledger/wallet/pre_genesis.rs rename to shared/src/sdk/wallet/pre_genesis.rs index dc19c8bfe54..fd66dedbfec 100644 --- a/shared/src/ledger/wallet/pre_genesis.rs +++ b/shared/src/sdk/wallet/pre_genesis.rs @@ -3,8 +3,8 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::ledger::wallet; -use crate::ledger::wallet::{store, StoredKeypair}; +use crate::sdk::wallet; +use crate::sdk::wallet::{store, StoredKeypair}; use crate::types::key::{common, SchemeType}; /// Ways in which wallet store operations can fail diff --git a/shared/src/ledger/wallet/store.rs b/shared/src/sdk/wallet/store.rs similarity index 99% rename from shared/src/ledger/wallet/store.rs rename to shared/src/sdk/wallet/store.rs index 09e04ed836f..509ff5afe68 100644 --- a/shared/src/ledger/wallet/store.rs +++ b/shared/src/sdk/wallet/store.rs @@ -17,7 +17,7 @@ use zeroize::Zeroizing; use super::alias::{self, Alias}; use super::derivation_path::DerivationPath; use super::pre_genesis; -use crate::ledger::wallet::{StoredKeypair, WalletUtils}; +use crate::sdk::wallet::{StoredKeypair, WalletUtils}; use crate::types::address::{Address, ImplicitAddress}; use crate::types::key::dkg_session_keys::DkgKeypair; use crate::types::key::*; diff --git a/shared/src/types/io.rs b/shared/src/types/io.rs new file mode 100644 index 00000000000..462dbef95f0 --- /dev/null +++ b/shared/src/types/io.rs @@ -0,0 +1,156 @@ +//! Traits for implementing IO handlers. This is to enable +//! generic IO. The defaults are the obvious Rust native +//! functions. + +/// Rust native I/O handling. +pub struct DefaultIo; + +#[async_trait::async_trait(?Send)] +impl Io for DefaultIo {} + +#[async_trait::async_trait(?Send)] +#[allow(missing_docs)] +pub trait Io { + fn print(output: impl AsRef) { + print!("{}", output.as_ref()); + } + + fn flush() { + use std::io::Write; + std::io::stdout().flush().unwrap(); + } + + fn println(output: impl AsRef) { + println!("{}", output.as_ref()); + } + + fn write( + mut writer: W, + output: impl AsRef, + ) -> std::io::Result<()> { + write!(writer, "{}", output.as_ref()) + } + + fn writeln( + mut writer: W, + output: impl AsRef, + ) -> std::io::Result<()> { + writeln!(writer, "{}", output.as_ref()) + } + + fn eprintln(output: impl AsRef) { + eprintln!("{}", output.as_ref()); + } + + async fn read() -> std::io::Result { + #[cfg(not(target_family = "wasm"))] + { + read_aux(tokio::io::stdin()).await + } + #[cfg(target_family = "wasm")] + { + unreachable!("Wasm should not perform general IO") + } + } + + async fn prompt(question: impl AsRef) -> String { + #[cfg(not(target_family = "wasm"))] + { + prompt_aux( + tokio::io::stdin(), + tokio::io::stdout(), + question.as_ref(), + ) + .await + } + #[cfg(target_family = "wasm")] + { + unreachable!( + "Wasm should not perform general IO; received call for input \ + with question\n: {}", + question.as_ref() + ) + } + } +} + +/// A generic function for displaying a prompt to users and reading +/// in their response. +#[cfg(not(target_family = "wasm"))] +pub async fn prompt_aux( + mut reader: R, + mut writer: W, + question: &str, +) -> String +where + R: tokio::io::AsyncReadExt + Unpin, + W: tokio::io::AsyncWriteExt + Unpin, +{ + writer + .write_all(question.as_bytes()) + .await + .expect("Unable to write"); + writer.flush().await.unwrap(); + let mut s = String::new(); + reader.read_to_string(&mut s).await.expect("Unable to read"); + s +} + +/// A generic function for reading input from users +#[cfg(not(target_family = "wasm"))] +pub async fn read_aux(mut reader: R) -> tokio::io::Result +where + R: tokio::io::AsyncReadExt + Unpin, +{ + let mut s = String::new(); + reader.read_to_string(&mut s).await?; + Ok(s) +} + +/// Convenience macro for formatting arguments to +/// [`Io::print`] +#[macro_export] +macro_rules! display { + ($io:ty) => { + <$io>::print("") + }; + ($io:ty, $w:expr; $($args:tt)*) => { + <$io>::write($w, format_args!($($args)*).to_string()) + }; + ($io:ty,$($args:tt)*) => { + <$io>::print(format_args!($($args)*).to_string()) + }; +} + +/// Convenience macro for formatting arguments to +/// [`Io::println`] and [`Io::writeln`] +#[macro_export] +macro_rules! display_line { + ($io:ty) => { + <$io>::println("") + }; + ($io:ty, $w:expr; $($args:tt)*) => { + <$io>::writeln($w, format_args!($($args)*).to_string()) + }; + ($io:ty,$($args:tt)*) => { + <$io>::println(format_args!($($args)*).to_string()) + }; +} + +/// Convenience macro for formatting arguments to +/// [`Io::eprintln`] +#[macro_export] +macro_rules! edisplay_line { + ($io:ty,$($args:tt)*) => { + <$io>::eprintln(format_args!($($args)*).to_string()) + }; +} + +#[macro_export] +/// A convenience macro for formatting the user prompt before +/// forwarding it to the [`Io::prompt`] method. +macro_rules! prompt { + ($io:ty,$($arg:tt)*) => {{ + <$io>::prompt(format!("{}", format_args!($($arg)*))) + }} +} diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 026295d7288..83e58f0fa84 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -1,8 +1,8 @@ //! Types definitions. pub mod control_flow; -pub mod error; pub mod ibc; +pub mod io; pub mod key; pub use namada_core::types::{ diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index 179472a8906..ac35b837d42 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -268,10 +268,6 @@ where /// To avoid unused parameter without "wasm-runtime" feature #[cfg(not(feature = "wasm-runtime"))] pub cache_access: std::marker::PhantomData, - #[cfg(not(feature = "mainnet"))] - /// This is true when the wrapper of this tx contained a valid - /// `testnet_pow::Solution` - has_valid_pow: bool, } /// A Validity predicate runner for calls from the [`vp_eval`] function. @@ -328,7 +324,6 @@ where keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> Self { let ctx = VpCtx::new( address, @@ -344,8 +339,6 @@ where eval_runner, #[cfg(feature = "wasm-runtime")] vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, ); Self { memory, ctx } @@ -396,7 +389,6 @@ where keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> Self { let address = unsafe { HostRef::new(address) }; let storage = unsafe { HostRef::new(storage) }; @@ -427,8 +419,6 @@ where vp_wasm_cache, #[cfg(not(feature = "wasm-runtime"))] cache_access: std::marker::PhantomData, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, } } } @@ -457,8 +447,6 @@ where vp_wasm_cache: self.vp_wasm_cache.clone(), #[cfg(not(feature = "wasm-runtime"))] cache_access: std::marker::PhantomData, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: self.has_valid_pow, } } } @@ -1796,6 +1784,8 @@ pub fn vp_verify_tx_section_signature( hash_list_len: u64, public_keys_map_ptr: u64, public_keys_map_len: u64, + signer_ptr: u64, + signer_len: u64, threshold: u8, max_signatures_ptr: u64, max_signatures_len: u64, @@ -1828,6 +1818,14 @@ where ) .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let (signer, gas) = env + .memory + .read_bytes(signer_ptr, signer_len as _) + .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + vp_host_fns::add_gas(gas_meter, gas)?; + let signer = Address::try_from_slice(&signer) + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let (max_signatures, gas) = env .memory .read_bytes(max_signatures_ptr, max_signatures_len as _) @@ -1839,12 +1837,13 @@ where let tx = unsafe { env.ctx.tx.get() }; Ok(HostEnvResult::from( - tx.verify_section_signatures( + tx.verify_signatures( &hashes, public_keys_map, + &Some(signer), threshold, max_signatures, - gas_meter, + Some(gas_meter), ) .is_ok(), ) @@ -1881,7 +1880,7 @@ where // TODO: once the runtime gas meter is implemented we need to benchmark // this funcion and charge the gas here. For the moment, the cost of // this is included in the benchmark of the masp vp - HostEnvResult::from(crate::ledger::masp::verify_shielded_tx(&shielded)) + HostEnvResult::from(crate::sdk::masp::verify_shielded_tx(&shielded)) .to_i64(), ) } @@ -2005,33 +2004,6 @@ where vp_host_fns::add_gas(gas_meter, gas) } -/// Find if the wrapper tx had a valid `testnet_pow::Solution` -pub fn vp_has_valid_pow( - env: &VpVmEnv, -) -> vp_host_fns::EnvResult -where - MEM: VmMemory, - DB: storage::DB + for<'iter> storage::DBIter<'iter>, - H: StorageHasher, - EVAL: VpEvaluator, - CA: WasmCacheAccess, -{ - #[cfg(feature = "mainnet")] - let _ = env; - - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = env.ctx.has_valid_pow; - #[cfg(feature = "mainnet")] - let has_valid_pow = false; - - Ok(if has_valid_pow { - HostEnvResult::Success - } else { - HostEnvResult::Fail - } - .to_i64()) -} - /// Log a string from exposed to the wasm VM VP environment. The message will be /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. @@ -2116,7 +2088,6 @@ pub mod testing { keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> VpVmEnv<'static, NativeMemory, DB, H, EVAL, CA> where DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, @@ -2139,8 +2110,6 @@ pub mod testing { eval_runner, #[cfg(feature = "wasm-runtime")] vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, ) } } diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index f31832b9bd0..30e58f38d7b 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -130,7 +130,6 @@ where "namada_vp_verify_masp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_masp), "namada_vp_eval" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_eval), "namada_vp_get_native_token" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_native_token), - "namada_vp_has_valid_pow" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_has_valid_pow), "namada_vp_log_string" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_log_string), }, } diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 667b6884ee1..7678ceb434f 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -86,11 +86,6 @@ pub enum Error { /// Result for functions that may fail pub type Result = std::result::Result; -enum WasmPayload<'fetch> { - Hash(&'fetch Hash), - Code(&'fetch [u8]), -} - /// Execute a transaction code. Returns the set verifiers addresses requested by /// the transaction. #[allow(clippy::too_many_arguments)] @@ -112,19 +107,10 @@ where .get_section(tx.code_sechash()) .and_then(|x| Section::code_sec(x.as_ref())) .ok_or(Error::MissingCode)?; - let (tx_hash, code) = match tx_code.code { - Commitment::Hash(code_hash) => (code_hash, None), - Commitment::Id(tx_code) => (Hash::sha256(&tx_code), Some(tx_code)), - }; - - let code_or_hash = match code { - Some(ref code) => WasmPayload::Code(code), - None => WasmPayload::Hash(&tx_hash), - }; let (module, store) = fetch_or_compile( tx_wasm_cache, - code_or_hash, + &tx_code.code, write_log, storage, gas_meter, @@ -195,7 +181,7 @@ where /// that triggered the execution. #[allow(clippy::too_many_arguments)] pub fn vp( - vp_code_hash: &Hash, + vp_code_hash: Hash, tx: &Tx, tx_index: &TxIndex, address: &Address, @@ -205,7 +191,6 @@ pub fn vp( keys_changed: &BTreeSet, verifiers: &BTreeSet
, mut vp_wasm_cache: VpCache, - #[cfg(not(feature = "mainnet"))] has_valid_pow: bool, ) -> Result where DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, @@ -215,7 +200,7 @@ where // Compile the wasm module let (module, store) = fetch_or_compile( &mut vp_wasm_cache, - WasmPayload::Hash(vp_code_hash), + &Commitment::Hash(vp_code_hash), write_log, storage, gas_meter, @@ -243,8 +228,6 @@ where keys_changed, &eval_runner, &mut vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, ); let initial_memory = @@ -254,7 +237,7 @@ where run_vp( module, imports, - vp_code_hash, + &vp_code_hash, tx, address, keys_changed, @@ -399,7 +382,7 @@ where // Compile the wasm module let (module, store) = fetch_or_compile( vp_wasm_cache, - WasmPayload::Hash(&vp_code_hash), + &Commitment::Hash(vp_code_hash), write_log, storage, gas_meter, @@ -455,7 +438,7 @@ pub fn prepare_wasm_code>(code: T) -> Result> { // loading and code compilation gas costs. fn fetch_or_compile( wasm_cache: &mut Cache, - code_or_hash: WasmPayload, + code_or_hash: &Commitment, write_log: &WriteLog, storage: &Storage, gas_meter: &mut dyn GasMetering, @@ -467,7 +450,7 @@ where CA: 'static + WasmCacheAccess, { match code_or_hash { - WasmPayload::Hash(code_hash) => { + Commitment::Hash(code_hash) => { let (module, store, tx_len) = match wasm_cache.fetch(code_hash)? { Some((module, store)) => { // Gas accounting even if the compiled module is in cache @@ -541,7 +524,7 @@ where gas_meter.add_compiling_gas(tx_len)?; Ok((module, store)) } - WasmPayload::Code(code) => { + Commitment::Id(code) => { gas_meter.add_compiling_gas( u64::try_from(code.len()) .map_err(|e| Error::ConversionError(e.to_string()))?, @@ -752,7 +735,7 @@ mod tests { // When the `eval`ed VP doesn't run out of memory, it should return // `true` let passed = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -762,8 +745,6 @@ mod tests { &keys_changed, &verifiers, vp_cache.clone(), - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap(); assert!(passed); @@ -786,7 +767,7 @@ mod tests { // `false`, hence we should also get back `false` from the VP that // called `eval`. let passed = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -796,8 +777,6 @@ mod tests { &keys_changed, &verifiers, vp_cache, - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap(); @@ -840,7 +819,7 @@ mod tests { outer_tx.set_code(Code::new(vec![])); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -850,8 +829,6 @@ mod tests { &keys_changed, &verifiers, vp_cache.clone(), - #[cfg(not(feature = "mainnet"))] - false, ); assert!(result.is_ok(), "Expected success, got {:?}", result); @@ -862,7 +839,7 @@ mod tests { outer_tx.header.chain_id = storage.chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); let error = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -872,8 +849,6 @@ mod tests { &keys_changed, &verifiers, vp_cache, - #[cfg(not(feature = "mainnet"))] - false, ) .expect_err("Expected to run out of memory"); @@ -976,7 +951,7 @@ mod tests { outer_tx.set_code(Code::new(vec![])); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -986,8 +961,6 @@ mod tests { &keys_changed, &verifiers, vp_cache, - #[cfg(not(feature = "mainnet"))] - false, ); // Depending on platform, we get a different error from the running out // of memory @@ -1104,7 +1077,7 @@ mod tests { outer_tx.set_code(Code::new(vec![])); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let error = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -1114,8 +1087,6 @@ mod tests { &keys_changed, &verifiers, vp_cache, - #[cfg(not(feature = "mainnet"))] - false, ) .expect_err("Expected to run out of memory"); @@ -1183,7 +1154,7 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let passed = vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -1193,8 +1164,6 @@ mod tests { &keys_changed, &verifiers, vp_cache, - #[cfg(not(feature = "mainnet"))] - false, ) .unwrap(); assert!(!passed); @@ -1317,7 +1286,7 @@ mod tests { storage.write(&len_key, code_len).unwrap(); vp( - &code_hash, + code_hash, &outer_tx, &tx_index, &addr, @@ -1327,8 +1296,6 @@ mod tests { &keys_changed, &verifiers, vp_cache, - #[cfg(not(feature = "mainnet"))] - false, ) } diff --git a/test_fixtures/masp_proofs/0DAF8BDF2318129AC828A7149AC83E76506147445D4DC22D57CBC9869BCDDA80.bin b/test_fixtures/masp_proofs/0DAF8BDF2318129AC828A7149AC83E76506147445D4DC22D57CBC9869BCDDA80.bin new file mode 100644 index 00000000000..f20cf90f1a5 Binary files /dev/null and b/test_fixtures/masp_proofs/0DAF8BDF2318129AC828A7149AC83E76506147445D4DC22D57CBC9869BCDDA80.bin differ diff --git a/test_fixtures/masp_proofs/12C933751C24BDC39C9108F5AF5D4C1BF345378A4FB6BB0B179BA8BDB0D2A3C0.bin b/test_fixtures/masp_proofs/12C933751C24BDC39C9108F5AF5D4C1BF345378A4FB6BB0B179BA8BDB0D2A3C0.bin new file mode 100644 index 00000000000..e24feb560a8 Binary files /dev/null and b/test_fixtures/masp_proofs/12C933751C24BDC39C9108F5AF5D4C1BF345378A4FB6BB0B179BA8BDB0D2A3C0.bin differ diff --git a/test_fixtures/masp_proofs/5B99F3D7E0CE75AB1F4B737EC88B269A5436CD72AA758686960F409B04841707.bin b/test_fixtures/masp_proofs/5B99F3D7E0CE75AB1F4B737EC88B269A5436CD72AA758686960F409B04841707.bin new file mode 100644 index 00000000000..30b0e399a35 Binary files /dev/null and b/test_fixtures/masp_proofs/5B99F3D7E0CE75AB1F4B737EC88B269A5436CD72AA758686960F409B04841707.bin differ diff --git a/test_fixtures/masp_proofs/889C046FA76727BC97433503BB79BAC90BA1F01653EBCFDCF7CC8AAA1BBEE462.bin b/test_fixtures/masp_proofs/889C046FA76727BC97433503BB79BAC90BA1F01653EBCFDCF7CC8AAA1BBEE462.bin new file mode 100644 index 00000000000..525e1e63ee2 Binary files /dev/null and b/test_fixtures/masp_proofs/889C046FA76727BC97433503BB79BAC90BA1F01653EBCFDCF7CC8AAA1BBEE462.bin differ diff --git a/test_fixtures/masp_proofs/A9FA2730222946FA51E9D587544FDED28D5E7D3C6B52DCF38A5978CEA70D6FD3.bin b/test_fixtures/masp_proofs/A9FA2730222946FA51E9D587544FDED28D5E7D3C6B52DCF38A5978CEA70D6FD3.bin new file mode 100644 index 00000000000..634d326dcdc Binary files /dev/null and b/test_fixtures/masp_proofs/A9FA2730222946FA51E9D587544FDED28D5E7D3C6B52DCF38A5978CEA70D6FD3.bin differ diff --git a/test_fixtures/masp_proofs/AC308C08512AF5DAA364B845D146763B3CE0BACFB7799C6744E50B9E7F43E961.bin b/test_fixtures/masp_proofs/AC308C08512AF5DAA364B845D146763B3CE0BACFB7799C6744E50B9E7F43E961.bin new file mode 100644 index 00000000000..b67b3c8cd08 Binary files /dev/null and b/test_fixtures/masp_proofs/AC308C08512AF5DAA364B845D146763B3CE0BACFB7799C6744E50B9E7F43E961.bin differ diff --git a/test_fixtures/masp_proofs/BE57BA4D8FB068F5A933E78DEF2989556FD771D368849D034E22923FD350EEEC.bin b/test_fixtures/masp_proofs/BE57BA4D8FB068F5A933E78DEF2989556FD771D368849D034E22923FD350EEEC.bin new file mode 100644 index 00000000000..c05883cbab7 Binary files /dev/null and b/test_fixtures/masp_proofs/BE57BA4D8FB068F5A933E78DEF2989556FD771D368849D034E22923FD350EEEC.bin differ diff --git a/test_fixtures/masp_proofs/E76E54B7526CD2B5423322FB711C0CA6AA6520A2AC8BC34A84358EA137F138D0.bin b/test_fixtures/masp_proofs/E76E54B7526CD2B5423322FB711C0CA6AA6520A2AC8BC34A84358EA137F138D0.bin new file mode 100644 index 00000000000..4df2e0be9f2 Binary files /dev/null and b/test_fixtures/masp_proofs/E76E54B7526CD2B5423322FB711C0CA6AA6520A2AC8BC34A84358EA137F138D0.bin differ diff --git a/tests/src/e2e/helpers.rs b/tests/src/e2e/helpers.rs index e2d887b5a21..5e4edce6aa3 100644 --- a/tests/src/e2e/helpers.rs +++ b/tests/src/e2e/helpers.rs @@ -167,14 +167,9 @@ pub fn get_actor_rpc(test: &Test, who: &Who) -> String { }; let config = Config::load(base_dir, &test.net.chain_id, Some(tendermint_mode)); - let ip = convert_tm_addr_to_socket_addr(&config.ledger.cometbft.rpc.laddr) - .ip() - .to_string(); - let port = - convert_tm_addr_to_socket_addr(&config.ledger.cometbft.rpc.laddr) - .port() - .to_string(); - format!("{}:{}", ip, port) + let socket_addr = + convert_tm_addr_to_socket_addr(&config.ledger.cometbft.rpc.laddr); + format!("{}:{}", socket_addr.ip(), socket_addr.port()) } /// Get the public key of the validator diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index f2a6d20da30..2f5bbe4ea71 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -20,6 +20,7 @@ use borsh::BorshSerialize; use color_eyre::eyre::Result; use data_encoding::HEXLOWER; use namada::types::address::Address; +use namada::types::io::DefaultIo; use namada::types::storage::Epoch; use namada::types::token; use namada_apps::client::tx::CLIShieldedUtils; @@ -33,7 +34,7 @@ use namada_core::ledger::governance::cli::onchain::{ PgfFunding, PgfFundingTarget, StewardsUpdate, }; use namada_test_utils::TestWasms; -use namada_vp_prelude::{testnet_pow, BTreeSet}; +use namada_vp_prelude::BTreeSet; use serde_json::json; use setup::constants::*; use setup::Test; @@ -409,22 +410,7 @@ fn stop_ledger_at_height() -> Result<()> { /// 8. Query the raw bytes of a storage key #[test] fn ledger_txs_and_queries() -> Result<()> { - let test = setup::network( - |genesis| { - #[cfg(not(feature = "mainnet"))] - { - GenesisConfig { - faucet_pow_difficulty: testnet_pow::Difficulty::try_new(1), - ..genesis - } - } - #[cfg(feature = "mainnet")] - { - genesis - } - }, - None, - )?; + let test = setup::network(|genesis| genesis, None)?; set_ethereum_bridge_mode( &test, @@ -569,42 +555,6 @@ fn ledger_txs_and_queries() -> Result<()> { "--node", &validator_one_rpc, ], - // 6. Submit a tx to withdraw from faucet account (requires PoW challenge - // solution) - vec![ - "transfer", - "--source", - "faucet", - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "10.1", - // Faucet withdrawal requires an explicit signer - "--signing-keys", - ALBERT_KEY, - "--node", - &validator_one_rpc, - ], - // 6. Submit a tx to withdraw from faucet account (requires PoW challenge - // solution) - vec![ - "transfer", - "--source", - "faucet", - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "10.1", - // Faucet withdrawal requires an explicit signer - "--signing-keys", - ALBERT_KEY, - "--node", - &validator_one_rpc, - ], ]; for tx_args in &txs_args { @@ -738,7 +688,7 @@ fn ledger_txs_and_queries() -> Result<()> { #[test] fn masp_txs_and_queries() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -882,11 +832,11 @@ fn masp_txs_and_queries() -> Result<()> { /// 1. Test that a tx requesting a disposable signer with a correct unshielding /// operation is succesful /// 2. Test that a tx requesting a disposable signer -/// providing an insufficient unshielding goes through the PoW +/// providing an insufficient unshielding fails #[test] fn wrapper_disposable_signer() -> Result<()> { // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -968,9 +918,10 @@ fn wrapper_disposable_signer() -> Result<()> { "--disposable-gas-payer", "--ledger-address", &validator_one_rpc, + "--force", ], // Not enough funds for fee payment, will use PoW - "Looking for a solution with difficulty", + "Error while processing transaction's fees", ), ]; diff --git a/tests/src/e2e/setup.rs b/tests/src/e2e/setup.rs index eecc60588f1..9a58230072b 100644 --- a/tests/src/e2e/setup.rs +++ b/tests/src/e2e/setup.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::ffi::OsStr; use std::fmt::Display; use std::fs::{File, OpenOptions}; -use std::net::SocketAddr; use std::path::{Path, PathBuf}; use std::process::Command; use std::str::FromStr; @@ -122,17 +121,17 @@ where let validator_0 = genesis.validator.get_mut("validator-0").unwrap(); // Clone the first validator before modifying it let other_validators = validator_0.clone(); - let net_address_0 = - SocketAddr::from_str(validator_0.net_address.as_ref().unwrap()) - .unwrap(); - let net_address_port_0 = net_address_0.port(); + let validator_0_target = validator_0.net_address.clone().unwrap(); + let split: Vec<&str> = validator_0_target.split(':').collect(); + let (net_target_0, net_address_port_0) = + (split[0], split[1].parse::().unwrap()); for ix in 0..num { let mut validator = other_validators.clone(); - let mut net_address = net_address_0; + let mut net_target = net_target_0.to_string(); // 6 ports for each validator let first_port = net_address_port_0 + port_offset(ix); - net_address.set_port(first_port); - validator.net_address = Some(net_address.to_string()); + net_target = format!("{}:{}", net_target, first_port); + validator.net_address = Some(net_target.to_string()); let name = format!("validator-{}", ix); genesis.validator.insert(name, validator); } diff --git a/tests/src/integration.rs b/tests/src/integration.rs index 8642e0e03c2..1a7c84dbfbd 100644 --- a/tests/src/integration.rs +++ b/tests/src/integration.rs @@ -1,3 +1,2 @@ mod masp; mod setup; -mod utils; diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index ac69f08a0dd..ecd1b34465d 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -2,9 +2,10 @@ use std::path::PathBuf; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; +use namada::types::io::DefaultIo; use namada_apps::client::tx::CLIShieldedUtils; use namada_apps::node::ledger::shell::testing::client::run; -use namada_apps::node::ledger::shell::testing::utils::Bin; +use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; use namada_core::types::address::{btc, eth, masp_rewards}; use namada_core::types::token; use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; @@ -16,7 +17,6 @@ use crate::e2e::setup::constants::{ AC_PAYMENT_ADDRESS, AC_VIEWING_KEY, ALBERT, A_SPENDING_KEY, BB_PAYMENT_ADDRESS, BERTHA, BTC, B_SPENDING_KEY, CHRISTEL, ETH, MASP, NAM, }; -use crate::integration::utils::CapturedOutput; /// In this test we verify that users of the MASP receive the correct rewards /// for leaving their assets in the pool for varying periods of time. @@ -29,7 +29,7 @@ fn masp_incentives() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -765,7 +765,7 @@ fn masp_pinned_txs() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); let mut node = setup::setup()?; // Wait till epoch boundary @@ -928,7 +928,7 @@ fn masp_txs_and_queries() -> Result<()> { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); enum Response { Ok(&'static str), @@ -1230,12 +1230,11 @@ fn masp_txs_and_queries() -> Result<()> { /// 3. Submit a new wrapper with an invalid unshielding tx and assert the /// failure #[test] -#[should_panic(expected = "No faucet account found")] fn wrapper_fee_unshielding() { // This address doesn't matter for tests. But an argument is required. let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node - let _ = CLIShieldedUtils::new(PathBuf::new()); + let _ = CLIShieldedUtils::new::(PathBuf::new()); // Lengthen epoch to ensure that a transaction can be constructed and // submitted within the same block. Necessary to ensure that conversion is // not invalidated. @@ -1282,8 +1281,6 @@ fn wrapper_fee_unshielding() { NAM, "--amount", "1", - "--gas-price", - "30", "--gas-limit", "20000", "--gas-spending-key", @@ -1298,7 +1295,7 @@ fn wrapper_fee_unshielding() { // 3. Invalid unshielding // TODO: this test shall panic because of the panic in the sdk. Once the // panics are removed from there, this test can be updated - run( + let tx_run = run( &node, Bin::Client, vec![ @@ -1317,7 +1314,10 @@ fn wrapper_fee_unshielding() { B_SPENDING_KEY, "--ledger-address", validator_one_rpc, + "--force", ], ) - .unwrap(); + .is_err(); + + assert!(tx_run); } diff --git a/tests/src/integration/utils.rs b/tests/src/integration/utils.rs deleted file mode 100644 index f626a001ee8..00000000000 --- a/tests/src/integration/utils.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::fs::File; -use std::path::PathBuf; -use std::sync::Arc; - -struct TempFile(PathBuf); -impl TempFile { - fn new(path: PathBuf) -> (Self, File) { - let f = File::create(&path).unwrap(); - (Self(path), f) - } -} - -impl Drop for TempFile { - fn drop(&mut self) { - _ = std::fs::remove_file(&self.0); - } -} - -/// Test helper that captures stdout of -/// a process. -pub struct CapturedOutput { - pub output: String, - pub result: T, - input: String, -} - -impl CapturedOutput { - pub fn with_input(input: String) -> Self { - Self { - output: "".to_string(), - result: (), - input, - } - } -} - -impl CapturedOutput { - /// Run a client command and capture - /// the output to the mocked stdout. - pub(crate) fn of(func: F) -> Self - where - F: FnOnce() -> T, - { - std::io::set_output_capture(Some(Default::default())); - let mut capture = Self { - output: Default::default(), - result: func(), - input: Default::default(), - }; - let captured = std::io::set_output_capture(None); - let captured = captured.unwrap(); - let captured = Arc::try_unwrap(captured).unwrap(); - let captured = captured.into_inner().unwrap(); - capture.output = String::from_utf8(captured).unwrap(); - capture - } - - /// Run a client command with input to the mocked stdin and capture - /// the output to the mocked stdout - pub fn run(&self, func: F) -> CapturedOutput - where - F: FnOnce() -> U, - { - { - // write the input to the mocked stdin - let mut buf = namada_apps::cli::TESTIN.lock().unwrap(); - buf.clear(); - buf.extend_from_slice(self.input.as_bytes()); - } - CapturedOutput::of(func) - } - - /// Check if the captured output contains the regex. - pub fn matches(&self, needle: regex::Regex) -> bool { - needle.captures(&self.output).is_some() - } - - /// Check if the captured output contains the string. - pub fn contains(&self, needle: &str) -> bool { - let needle = regex::Regex::new(needle).unwrap(); - self.matches(needle) - } -} diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 7e359474772..1d9958a30a3 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,4 +1,3 @@ -#![cfg_attr(test, feature(internal_output_capture))] //! Namada integrations and WASM tests and testing helpers. #![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] @@ -13,7 +12,6 @@ pub use vm_host_env::{ibc, tx, vp}; #[cfg(test)] mod e2e; #[cfg(test)] -#[allow(dead_code)] mod integration; pub mod native_vp; pub mod storage; diff --git a/tests/src/native_vp/eth_bridge_pool.rs b/tests/src/native_vp/eth_bridge_pool.rs index 22584e1464c..364dcd074c6 100644 --- a/tests/src/native_vp/eth_bridge_pool.rs +++ b/tests/src/native_vp/eth_bridge_pool.rs @@ -77,10 +77,6 @@ mod test_bridge_pool_vp { address: EthAddress([42; 20]), version: Default::default(), }, - governance: UpgradeableContract { - address: EthAddress([18; 20]), - version: Default::default(), - }, }, }; // initialize Ethereum bridge storage diff --git a/tests/src/storage_api/mod.rs b/tests/src/storage_api/mod.rs index a03a21ebd0b..bc487bd59e1 100644 --- a/tests/src/storage_api/mod.rs +++ b/tests/src/storage_api/mod.rs @@ -1,2 +1 @@ mod collections; -mod testnet_pow; diff --git a/tests/src/storage_api/testnet_pow.rs b/tests/src/storage_api/testnet_pow.rs deleted file mode 100644 index ab45bc99c8d..00000000000 --- a/tests/src/storage_api/testnet_pow.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! Tests for [`namada_core::ledger::testnet_pow`]. - -use namada_core::ledger::storage_api; -use namada_core::ledger::testnet_pow::*; -use namada_core::types::{address, token}; - -use crate::tx::{self, TestTxEnv}; -use crate::vp; - -#[test] -fn test_challenge_and_solution() -> storage_api::Result<()> { - let faucet_address = address::testing::established_address_1(); - let difficulty = Difficulty::try_new(1).unwrap(); - let withdrawal_limit = token::Amount::native_whole(1_000).into(); - - let mut tx_env = TestTxEnv::default(); - - // Source address that's using PoW (this would be derived from the tx - // wrapper pk) - let source = address::testing::established_address_2(); - - // Ensure that the addresses exists, so we can use them in a tx - tx_env.spawn_accounts([&faucet_address, &source]); - - init_faucet_storage( - &mut tx_env.wl_storage, - &faucet_address, - difficulty, - withdrawal_limit, - )?; - tx_env.commit_genesis(); - - let challenge = Challenge::new( - &mut tx_env.wl_storage, - &faucet_address, - source.clone(), - )?; - - let solution = challenge.solve(); - - // The solution must be valid - assert!(solution.verify_solution(source.clone())); - - // Changing the solution to `0` invalidates it - { - let solution = Solution { - value: 0, - ..solution.clone() - }; - // If you're unlucky and this fails, try changing the solution to - // a different literal. - assert!(!solution.verify_solution(source.clone())); - } - // Changing the counter invalidates it - { - let solution = Solution { - params: ChallengeParams { - difficulty: solution.params.difficulty, - counter: 10, - }, - ..solution - }; - // If you're unlucky and this fails, try changing the counter to - // a different literal. - assert!(!solution.verify_solution(source.clone())); - } - - // Apply the solution from a tx - vp::vp_host_env::init_from_tx(faucet_address.clone(), tx_env, |_addr| { - solution - .apply_from_tx(tx::ctx(), &faucet_address, &source) - .unwrap(); - }); - - // Check that it's valid - let is_valid = - solution.validate(&vp::ctx().pre(), &faucet_address, source.clone())?; - assert!(is_valid); - - // Commit the tx - let vp_env = vp::vp_host_env::take(); - tx::tx_host_env::set_from_vp_env(vp_env); - tx::tx_host_env::commit_tx_and_block(); - let tx_env = tx::tx_host_env::take(); - - // Re-apply the same solution from a tx - vp::vp_host_env::init_from_tx(faucet_address.clone(), tx_env, |_addr| { - solution - .apply_from_tx(tx::ctx(), &faucet_address, &source) - .unwrap(); - }); - - // Check that it's not longer valid - let is_valid = - solution.validate(&vp::ctx().pre(), &faucet_address, source)?; - assert!(!is_valid); - - Ok(()) -} diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index fe61570fb8b..27545c644e4 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -466,7 +466,7 @@ mod tests { let mut tx = Tx::new(chain_id, expiration); tx.add_code(code.clone()) .add_serialized_data(data.to_vec()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); env.tx = tx; env.tx.clone() @@ -474,17 +474,18 @@ mod tests { assert_eq!(signed_tx_data.data().as_ref(), Some(data)); assert!( signed_tx_data - .verify_section_signatures( + .verify_signatures( &[ *signed_tx_data.data_sechash(), *signed_tx_data.code_sechash(), ], pks_map, + &None, 1, None, - &mut VpGasMeter::new_from_tx_meter( + Some(&mut VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ) + )) ) .is_ok() ); @@ -492,7 +493,7 @@ mod tests { let other_keypair = key::testing::keypair_2(); assert!( signed_tx_data - .verify_section_signatures( + .verify_signatures( &[ *signed_tx_data.data_sechash(), *signed_tx_data.code_sechash(), @@ -500,11 +501,12 @@ mod tests { AccountPublicKeysMap::from_iter([ other_keypair.ref_to() ]), + &None, 1, None, - &mut VpGasMeter::new_from_tx_meter( + Some(&mut VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ) + )) ) .is_err() ); @@ -567,7 +569,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(input_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); let result = vp::CTX.eval(empty_code, tx).unwrap(); assert!(!result); @@ -589,7 +591,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code_from_hash(code_hash) .add_serialized_data(input_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); let result = vp::CTX.eval(code_hash, tx).unwrap(); assert!(result); @@ -612,7 +614,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code_from_hash(code_hash) .add_serialized_data(input_data) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); let result = vp::CTX.eval(code_hash, tx).unwrap(); assert!(!result); @@ -637,7 +639,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // create a client with the message @@ -671,7 +673,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // update the client with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -714,7 +716,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // init a connection with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -747,7 +749,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open the connection with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -791,7 +793,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // open try a connection with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -824,7 +826,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open the connection with the mssage tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -870,7 +872,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // init a channel with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -903,7 +905,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open the channle with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -949,7 +951,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // try open a channel with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -983,7 +985,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open a channel with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1032,7 +1034,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // close the channel with the message let mut actions = tx_host_env::ibc::ibc_actions(tx::ctx()); @@ -1089,7 +1091,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // close the channel with the message @@ -1143,7 +1145,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs.clone(), pks_map.clone()) + .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // send the token and a packet with the data tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1190,7 +1192,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // ack the packet with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1280,7 +1282,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // send the token and a packet with the data tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1353,7 +1355,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // receive a packet with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1432,7 +1434,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // Receive the packet, but no token is received tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1536,7 +1538,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // receive a packet with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1641,7 +1643,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // receive a packet with the message tx_host_env::ibc::ibc_actions(tx::ctx()) @@ -1741,7 +1743,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // timeout the packet @@ -1830,7 +1832,7 @@ mod tests { let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![]) .add_serialized_data(tx_data.clone()) - .sign_raw(keypairs, pks_map) + .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // timeout the packet diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index 7324fbd90ec..69d7af9bb1b 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -52,8 +52,6 @@ pub struct TestVpEnv { pub result_buffer: Option>, pub vp_wasm_cache: VpCache, pub vp_cache_dir: TempDir, - #[cfg(not(feature = "mainnet"))] - pub has_valid_pow: bool, } impl Default for TestVpEnv { @@ -87,8 +85,6 @@ impl Default for TestVpEnv { result_buffer: None, vp_wasm_cache, vp_cache_dir, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, } } } @@ -270,8 +266,6 @@ mod native_vp_host_env { result_buffer, vp_wasm_cache, vp_cache_dir: _, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, }: &mut TestVpEnv| { let env = vm::host_env::testing::vp_env( @@ -287,8 +281,6 @@ mod native_vp_host_env { keys_changed, eval_runner, vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - *has_valid_pow, ); // Call the `host_env` function and unwrap any @@ -317,8 +309,6 @@ mod native_vp_host_env { result_buffer, vp_wasm_cache, vp_cache_dir: _, - #[cfg(not(feature = "mainnet"))] - has_valid_pow, }: &mut TestVpEnv| { let env = vm::host_env::testing::vp_env( @@ -334,8 +324,6 @@ mod native_vp_host_env { keys_changed, eval_runner, vp_wasm_cache, - #[cfg(not(feature = "mainnet"))] - *has_valid_pow, ); // Call the `host_env` function and unwrap any @@ -372,13 +360,14 @@ mod native_vp_host_env { input_data_ptr: u64, input_data_len: u64, ) -> i64); - native_host_fn!(vp_has_valid_pow() -> i64); native_host_fn!(vp_log_string(str_ptr: u64, str_len: u64)); native_host_fn!(vp_verify_tx_section_signature( hash_list_ptr: u64, hash_list_len: u64, public_keys_map_ptr: u64, public_keys_map_len: u64, + signer_ptr: u64, + signer_len: u64, threshold: u8, max_signatures_ptr: u64, max_signatures_len: u64,) diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index 099460258b8..9a540d5808f 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -202,6 +202,8 @@ pub mod vp { hash_list_len: u64, public_keys_map_ptr: u64, public_keys_map_len: u64, + signer_ptr: u64, + signer_len: u64, threshold: u8, max_signatures_ptr: u64, max_signatures_len: u64, @@ -216,8 +218,6 @@ pub mod vp { pub fn namada_vp_verify_masp(tx_ptr: u64, tx_len: u64) -> i64; - pub fn namada_vp_has_valid_pow() -> i64; - /// Charge the provided amount of gas for the current vp pub fn namada_vp_charge_gas(used_gas: u64); } diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 32e29df46b7..09626283633 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -15,13 +15,13 @@ use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; pub use namada_core::ledger::governance::storage as gov_storage; +pub use namada_core::ledger::parameters; pub use namada_core::ledger::pgf::storage as pgf_storage; pub use namada_core::ledger::storage_api::{ self, iter_prefix, iter_prefix_bytes, Error, OptionExt, ResultExt, StorageRead, }; pub use namada_core::ledger::vp_env::VpEnv; -pub use namada_core::ledger::{parameters, testnet_pow}; pub use namada_core::proto::{Section, Tx}; pub use namada_core::types::address::Address; use namada_core::types::chain::CHAIN_ID_LENGTH; @@ -94,6 +94,7 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let max_signatures = max_signatures_per_transaction.try_to_vec().unwrap(); let public_keys_map = public_keys_index_map.try_to_vec().unwrap(); let targets = targets.try_to_vec().unwrap(); + let signer = owner.try_to_vec().unwrap(); let valid = unsafe { namada_vp_verify_tx_section_signature( @@ -101,6 +102,8 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { targets.len() as _, public_keys_map.as_ptr() as _, public_keys_map.len() as _, + signer.as_ptr() as _, + signer.len() as _, threshold, max_signatures.as_ptr() as _, max_signatures.len() as _, @@ -188,12 +191,6 @@ impl Ctx { pub fn post(&self) -> CtxPostStorageRead<'_> { CtxPostStorageRead { _ctx: self } } - - /// Check if the wrapper tx contained a valid testnet PoW - pub fn has_valid_pow(&self) -> bool { - let valid = unsafe { namada_vp_has_valid_pow() }; - HostEnvResult::is_success(valid) - } } /// Read access to the prior storage (state before tx execution) via diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 4edaef87c47..d547e7dc8b3 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -1677,8 +1677,8 @@ dependencies = [ [[package]] name = "ethbridge-bridge-contract" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethbridge-bridge-events", "ethbridge-structs", @@ -1688,30 +1688,8 @@ dependencies = [ [[package]] name = "ethbridge-bridge-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" -dependencies = [ - "ethabi", - "ethbridge-structs", - "ethers", - "ethers-contract", -] - -[[package]] -name = "ethbridge-governance-contract" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" -dependencies = [ - "ethbridge-governance-events", - "ethbridge-structs", - "ethers", - "ethers-contract", -] - -[[package]] -name = "ethbridge-governance-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethabi", "ethbridge-structs", @@ -1721,8 +1699,8 @@ dependencies = [ [[package]] name = "ethbridge-structs" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethabi", "ethers", @@ -3344,7 +3322,6 @@ dependencies = [ "derivation-path", "derivative", "ethbridge-bridge-contract", - "ethbridge-governance-contract", "ethers", "eyre", "futures", diff --git a/wasm/checksums.json b/wasm/checksums.json index 9c50dd46dc8..193353b640f 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,23 +1,22 @@ { - "tx_bond.wasm": "tx_bond.8d1ddbeb397209c5efa22dd57fbdb31825d67c2942441cb2612583ec2593831a.wasm", - "tx_bridge_pool.wasm": "tx_bridge_pool.41ada308019a6227a495d996f5d3248e3f8052fcadf8779ee2b2e293aa73ccd0.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.137f2871848970aa9cf1d3a92a1e1a6e7a48b0537632d838bbb4e69fd301f8c3.wasm", - "tx_ibc.wasm": "tx_ibc.af007e03e8de1f8c34eb928fcfe91fd44b05c0183ca1149c5b262c8f62fcdd36.wasm", - "tx_init_account.wasm": "tx_init_account.d527ea17b417fca1a72d6a26abc34219630efcad4701e629a89e026e06ee06c1.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.e605bb96ff8b6ad1e10491a81590d15ed792f87b0382d1faee9966cb25a09028.wasm", - "tx_init_validator.wasm": "tx_init_validator.91ce97ff0bfa49ce9baa7585ae7e2c0514e91a66c625502b4aced635da5b021a.wasm", - "tx_resign_steward.wasm": "tx_resign_steward.a34a5737653182ecb75f6fcbac5b74ef849eb29b0072a20e90897acc14d8886e.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.8ab38f516ac799dcb96ba372cd5e5defd381ddf9d69579ce1556d1721d34f668.wasm", - "tx_transfer.wasm": "tx_transfer.738ac69d4a4f3dfb154aeed6b806ef1042b1a707de98bf8c6cc5ad66d478f6d9.wasm", - "tx_unbond.wasm": "tx_unbond.0c90a1f9a95b7171e0ebdca8318c19ba45f358158aa68370f630f84025635c8f.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.310c196cb7b2d371bb74fe37ee1f2f7e233ead59477027891e4e28751b6bb3fe.wasm", - "tx_update_account.wasm": "tx_update_account.8f5934e4fcca4e7d3c58e1c0b8722ce0a948efa6b99e7801dd1c16f8ea22fb59.wasm", - "tx_update_steward_commission.wasm": "tx_update_steward_commission.3deda3d2d0fcce2e14c6a4d72931ea3a3713666c6eed5fd29a78e30d395b3cf5.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.33567afd0c59d5f2499a3cf4ebf3c254de1cae1d310d004b8e0e538f2fc8377a.wasm", - "tx_withdraw.wasm": "tx_withdraw.00e0a04e892eb67ac3c3c7a3162b12dc198506c1c466893923911c3ab471dc03.wasm", - "vp_implicit.wasm": "vp_implicit.0fc8114c5d87db4d818b4b38ded664c1ca1d3d42026e6b1328213c00e99f01eb.wasm", - "vp_masp.wasm": "vp_masp.8b01ab3c616342973fb30e37c8b9e4c83615f25b7cc6657f0360146266d36890.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.720d6808fa225cb3381ac0ff682e1c9136d8ef1193933ce30356ef2252fee001.wasm", - "vp_user.wasm": "vp_user.f93b90d5a0226c79159edd48f2801e7a12525751b937fda58525a8fc8b42d745.wasm", - "vp_validator.wasm": "vp_validator.3decad0fd761b928cdec3f89ed241fc218dd402be300f5edabf511677ae3d37d.wasm" + "tx_bond.wasm": "tx_bond.a0631898c43c82add1a5df8102c8a4e3967b2d6dae99f5d77d7cb7aff314a5e9.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.6c1a3fb49a25adcfca928a3902a858547a57d9696366b674b9f0d8a327ea8a12.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.8dc88e362ad2dea3039c0451bc654e804e28cd8a3c6761237f7d016f621e942b.wasm", + "tx_ibc.wasm": "tx_ibc.0491c038523456c0f23bc9d5aa8d0257e64d6cc63bf6dd3bbac3f729feaf668a.wasm", + "tx_init_account.wasm": "tx_init_account.957be876dc7a865c7b0acff2368195ad4bc786a9beb14837e28f1789cab0eff3.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.161c79ec6fbd6262e80896c0901a629de7d5e2888d56b8025d8dfa5d06c97440.wasm", + "tx_init_validator.wasm": "tx_init_validator.2fd11c1339476a9a154dff5782182e49383641ed53b997638983f05d318ac69b.wasm", + "tx_resign_steward.wasm": "tx_resign_steward.78792e7977322f2dc163258169978039fd571bd0c1b726ff6f78759e9a5ebb06.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.9eb149f4360fe7e37be5214a6cb01110757565a4c9b36de5bd7122cadab3c095.wasm", + "tx_transfer.wasm": "tx_transfer.7e9835463134a998d66071d44ac9580c8b70203ebe38641a72e4ad6e12fb70fc.wasm", + "tx_unbond.wasm": "tx_unbond.7fbae71401bc73610918457f0b52477706eaeaff86b83f8f0099882929a5884c.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.ffdd901a188ba1bcedb9a1e34afa8b14ee99406c4599f8d1e76ccb02e9ab53ed.wasm", + "tx_update_account.wasm": "tx_update_account.a43bc2c2be95ab7c140f038a4c5889857e27cb6071a1357c9e6b314d6c9da7f9.wasm", + "tx_update_steward_commission.wasm": "tx_update_steward_commission.34b5b47690f95ce5d4591ca5470dbcd1fe1a9e7add85b665c23f3f7d810e824a.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.fa68528173e5ce09507b96825f59fe870485080d103c0387408d7a4443c18989.wasm", + "tx_withdraw.wasm": "tx_withdraw.d7512dfbe8eaad71bba1d98a9e2f3115777806006cb1a8ef29bf05337936c268.wasm", + "vp_implicit.wasm": "vp_implicit.ac88e968f6e0f9a50798425e45dc0c654abe9371234502d20670c8029e706b64.wasm", + "vp_masp.wasm": "vp_masp.f009a84a78edf4d6174bf82e649784bb121d064d0d577e75b63f3a5e3719a845.wasm", + "vp_user.wasm": "vp_user.69cc290c6824557e7cc233b4287e301d4a402ede3af66c186016a135bb423115.wasm", + "vp_validator.wasm": "vp_validator.c03262cfbfe57bc0c798aeb54257c493dbbf6f6fea7f4239215fed8bcc68a476.wasm" } \ No newline at end of file diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 67e7bbc913f..4682bc612ed 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -31,7 +31,6 @@ tx_update_steward_commission = ["namada_tx_prelude"] tx_resign_steward = ["namada_tx_prelude"] vp_implicit = ["namada_vp_prelude", "once_cell"] vp_masp = ["namada_vp_prelude", "masp_primitives"] -vp_testnet_faucet = ["namada_vp_prelude", "once_cell"] vp_token = ["namada_vp_prelude"] vp_user = ["namada_vp_prelude", "once_cell"] vp_validator = ["namada_vp_prelude", "once_cell"] diff --git a/wasm/wasm_source/Makefile b/wasm/wasm_source/Makefile index 247062d6108..7b00424bafd 100644 --- a/wasm/wasm_source/Makefile +++ b/wasm/wasm_source/Makefile @@ -23,7 +23,6 @@ wasms += tx_update_steward_commission wasms += tx_resign_steward wasms += vp_implicit wasms += vp_masp -wasms += vp_testnet_faucet wasms += vp_user wasms += vp_validator diff --git a/wasm/wasm_source/src/lib.rs b/wasm/wasm_source/src/lib.rs index f4fd69cda3d..d376f8ca709 100644 --- a/wasm/wasm_source/src/lib.rs +++ b/wasm/wasm_source/src/lib.rs @@ -35,8 +35,6 @@ pub mod tx_withdraw; pub mod vp_implicit; #[cfg(feature = "vp_masp")] pub mod vp_masp; -#[cfg(feature = "vp_testnet_faucet")] -pub mod vp_testnet_faucet; #[cfg(feature = "vp_user")] pub mod vp_user; #[cfg(feature = "vp_validator")] diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index a67b40d357b..215dccf421d 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -187,7 +187,7 @@ fn validate_tx( mod tests { // Use this as `#[test]` annotation to enable logging use namada::ledger::pos::{GenesisValidator, PosParams}; - use namada::proto::{Code, Data, MultiSignature}; + use namada::proto::{Code, Data, Signature}; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada::types::transaction::TxType; @@ -535,10 +535,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); @@ -671,10 +671,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); @@ -839,10 +839,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -932,10 +932,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -987,10 +987,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs deleted file mode 100644 index 44b5adfb06e..00000000000 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ /dev/null @@ -1,466 +0,0 @@ -//! A "faucet" account for testnet. -//! -//! This VP allows anyone to withdraw up to -//! [`testnet_pow::read_withdrawal_limit`] tokens without the faucet's -//! signature, but with a valid PoW challenge solution that cannot be replayed. -//! -//! Any other storage key changes are allowed only with a valid signature. - -use namada_vp_prelude::*; -use once_cell::unsync::Lazy; - -#[validity_predicate(gas = 0)] -fn validate_tx( - ctx: &Ctx, - tx_data: Tx, - addr: Address, - keys_changed: BTreeSet, - verifiers: BTreeSet
, -) -> VpResult { - debug_log!( - "vp_testnet_faucet called with user addr: {}, key_changed: {:?}, \ - verifiers: {:?}", - addr, - keys_changed, - verifiers - ); - - let valid_sig = Lazy::new(|| { - matches!(verify_signatures(ctx, &tx_data, &addr), Ok(true)) - }); - - if !is_valid_tx(ctx, &tx_data)? { - return reject(); - } - - for key in keys_changed.iter() { - let is_valid = if let Some([token, owner]) = - token::is_any_token_balance_key(key) - { - if owner == &addr { - let pre: token::Amount = ctx.read_pre(key)?.unwrap_or_default(); - let post: token::Amount = - ctx.read_post(key)?.unwrap_or_default(); - let change = post.change() - pre.change(); - let maybe_denom = - storage_api::token::read_denom(&ctx.pre(), token)?; - if maybe_denom.is_none() { - debug_log!( - "A denomination for token address {} does not exist \ - in storage", - token, - ); - return reject(); - } - let denom = maybe_denom.unwrap(); - if !change.non_negative() { - // Allow to withdraw without a sig if there's a valid PoW - if ctx.has_valid_pow() { - let max_free_debit = - testnet_pow::read_withdrawal_limit( - &ctx.pre(), - &addr, - )?; - - token::Amount::from_uint(change.abs(), 0).unwrap() - <= token::Amount::from_uint(max_free_debit, denom) - .unwrap() - } else { - debug_log!("No PoW solution, a signature is required"); - // Debit without a solution has to signed - *valid_sig - } - } else { - // credit is permissive - true - } - } else { - // balance changes of other accounts - true - } - } else if let Some(owner) = key.is_validity_predicate() { - let has_post: bool = ctx.has_key_post(key)?; - if owner == &addr { - if has_post { - let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); - return Ok(*valid_sig && is_vp_whitelisted(ctx, &vp_hash)?); - } else { - return reject(); - } - } else { - let vp_hash: Vec = ctx.read_bytes_post(key)?.unwrap(); - return is_vp_whitelisted(ctx, &vp_hash); - } - } else { - // Allow any other key change if authorized by a signature - *valid_sig - }; - - if !is_valid { - debug_log!("key {} modification failed vp", key); - return reject(); - } - } - - accept() -} - -#[cfg(test)] -mod tests { - use address::testing::arb_non_internal_address; - use namada::proto::{Code, Data, MultiSignature, Signature}; - use namada::types::transaction::TxType; - use namada_test_utils::TestWasms; - // Use this as `#[test]` annotation to enable logging - use namada_tests::log::test; - use namada_tests::tx::{self, tx_host_env, TestTxEnv}; - use namada_tests::vp::vp_host_env::storage::Key; - use namada_tests::vp::*; - use namada_tx_prelude::{StorageWrite, TxEnv}; - use namada_vp_prelude::account::AccountPublicKeysMap; - use namada_vp_prelude::key::RefTo; - use proptest::prelude::*; - use storage::testing::arb_account_storage_key_no_vp; - - use super::*; - - /// Allows anyone to withdraw up to 1_000 tokens in a single tx - pub const MAX_FREE_DEBIT: i128 = 1_000_000_000; // in micro units - - /// Test that no-op transaction (i.e. no storage modifications) accepted. - #[test] - fn test_no_op_transaction() { - let mut tx_data = Tx::from_type(TxType::Raw); - tx_data.set_data(Data::new(vec![])); - let addr: Address = address::testing::established_address_1(); - let keys_changed: BTreeSet = BTreeSet::default(); - let verifiers: BTreeSet
= BTreeSet::default(); - - // The VP env must be initialized before calling `validate_tx` - vp_host_env::init(); - - assert!( - validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap() - ); - } - - /// Test that a credit transfer is accepted. - #[test] - fn test_credit_transfer_accepted() { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - let vp_owner = address::testing::established_address_1(); - let source = address::testing::established_address_2(); - let token = address::nam(); - let amount = token::Amount::from_uint(10_098_123, 0).unwrap(); - - // Spawn the accounts to be able to modify their storage - tx_env.spawn_accounts([&vp_owner, &source, &token]); - - // Credit the tokens to the source before running the transaction to be - // able to transfer from it - tx_env.credit_tokens(&source, &token, amount); - - let amount = token::DenominatedAmount { - amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { - // Apply transfer in a transaction - tx_host_env::token::transfer( - tx_host_env::ctx(), - &source, - address, - &token, - amount, - &None, - &None, - &None, - ) - .unwrap(); - }); - - let vp_env = vp_host_env::take(); - let mut tx_data = Tx::from_type(TxType::Raw); - tx_data.set_data(Data::new(vec![])); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that a validity predicate update without a valid signature is - /// rejected. - #[test] - fn test_unsigned_vp_update_rejected() { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - let vp_owner = address::testing::established_address_1(); - let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); - let vp_hash = sha256(&vp_code); - // for the update - tx_env.store_wasm_code(vp_code); - - // Spawn the accounts to be able to modify their storage - tx_env.spawn_accounts([&vp_owner]); - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { - // Update VP in a transaction - tx::ctx() - .update_validity_predicate(address, vp_hash) - .unwrap(); - }); - - let vp_env = vp_host_env::take(); - let mut tx_data = Tx::from_type(TxType::Raw); - tx_data.set_data(Data::new(vec![])); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that a validity predicate update with a valid signature is - /// accepted. - #[test] - fn test_signed_vp_update_accepted() { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - let vp_owner = address::testing::established_address_1(); - let keypair = key::testing::keypair_1(); - let public_key = &keypair.ref_to(); - let vp_code = TestWasms::VpAlwaysTrue.read_bytes(); - let vp_hash = sha256(&vp_code); - // for the update - tx_env.store_wasm_code(vp_code); - - // Spawn the accounts to be able to modify their storage - tx_env.spawn_accounts([&vp_owner]); - tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { - // Update VP in a transaction - tx::ctx() - .update_validity_predicate(address, vp_hash) - .unwrap(); - }); - - let pks_map = AccountPublicKeysMap::from_iter(vec![public_key.clone()]); - - let mut vp_env = vp_host_env::take(); - let mut tx = vp_env.tx.clone(); - tx.set_data(Data::new(vec![])); - tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, - ))); - let signed_tx = tx.clone(); - vp_env.tx = signed_tx.clone(); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() - ); - } - - prop_compose! { - /// Generates an account address and a storage key inside its storage. - fn arb_account_storage_subspace_key() - // Generate an address - (address in arb_non_internal_address()) - // Generate a storage key other than its VP key (VP cannot be - // modified directly via `write`, it has to be modified via - // `tx::update_validity_predicate`. - (storage_key in arb_account_storage_key_no_vp(address.clone()), - // Use the generated address too - address in Just(address)) - -> (Address, Key) { - (address, storage_key) - } - } - - proptest! { - /// Test that a debit of more than [`MAX_FREE_DEBIT`] tokens without a valid signature is rejected. - #[test] - fn test_unsigned_debit_over_limit_rejected(amount in (MAX_FREE_DEBIT as u64 + 1..)) { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - // Init the VP - let vp_owner = address::testing::established_address_1(); - let difficulty = testnet_pow::Difficulty::try_new(0).unwrap(); - let withdrawal_limit = token::Amount::from_uint(MAX_FREE_DEBIT as u64, 0).unwrap(); - testnet_pow::init_faucet_storage(&mut tx_env.wl_storage, &vp_owner, difficulty, withdrawal_limit.into()).unwrap(); - - let target = address::testing::established_address_2(); - let token = address::nam(); - let amount = token::Amount::from_uint(amount, 0).unwrap(); - - // Spawn the accounts to be able to modify their storage - tx_env.spawn_accounts([&vp_owner, &target, &token]); - - // Credit the tokens to the VP owner before running the transaction to - // be able to transfer from it - tx_env.credit_tokens(&vp_owner, &token, amount); - tx_env.commit_genesis(); - let amount = token::DenominatedAmount { - amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into() - }; - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { - // Apply transfer in a transaction - tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount, &None, &None, &None).unwrap(); - }); - - let vp_env = vp_host_env::take(); - let mut tx_data = Tx::from_type(TxType::Raw); - tx_data.set_data(Data::new(vec![])); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!(!validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); - } - - /// Test that a debit of less than or equal to [`MAX_FREE_DEBIT`] tokens - /// without a valid signature but with a valid PoW solution is accepted. - #[test] - fn test_unsigned_debit_under_limit_accepted(amount in (..MAX_FREE_DEBIT as u64 + 1)) { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - // Init the VP - let vp_owner = address::testing::established_address_1(); - let difficulty = testnet_pow::Difficulty::try_new(0).unwrap(); - let withdrawal_limit = token::Amount::from_uint(MAX_FREE_DEBIT as u64, 0).unwrap(); - testnet_pow::init_faucet_storage(&mut tx_env.wl_storage, &vp_owner, difficulty, withdrawal_limit.into()).unwrap(); - - let target = address::testing::established_address_2(); - let target_key = key::testing::keypair_1(); - let _public_key = target_key.ref_to(); - let token = address::nam(); - let amount = token::Amount::from_uint(amount, 0).unwrap(); - - // Spawn the accounts to be able to modify their storage - tx_env.spawn_accounts([&vp_owner, &target, &token]); - - // Credit the tokens to the VP owner before running the transaction to - // be able to transfer from it - tx_env.credit_tokens(&vp_owner, &token, amount); - // write the denomination of NAM into storage - storage_api::token::write_denom(&mut tx_env.wl_storage, &token, token::NATIVE_MAX_DECIMAL_PLACES.into()).unwrap(); - tx_env.commit_genesis(); - - // Construct a PoW solution like a client would - let challenge = testnet_pow::Challenge::new(&mut tx_env.wl_storage, &vp_owner, target.clone()).unwrap(); - let solution = challenge.solve(); - let solution_bytes = solution.try_to_vec().unwrap(); - - let amount = token::DenominatedAmount { - amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { - // Don't call `Solution::invalidate_if_valid` - this is done by the - // shell's finalize_block. - let valid = solution.validate(tx::ctx(), address, target.clone()).unwrap(); - assert!(valid); - // Apply transfer in a transaction - tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount, &None, &None, &None).unwrap(); - }); - - let mut vp_env = vp_host_env::take(); - // This is set by the protocol when the wrapper tx has a valid PoW - vp_env.has_valid_pow = true; - let mut tx_data = Tx::from_type(TxType::Raw); - tx_data.set_data(Data::new(solution_bytes)); - tx_data.set_code(Code::new(vec![])); - tx_data.add_section(Section::Signature(Signature::new(vec![*tx_data.data_sechash(), *tx_data.code_sechash()], &target_key))); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!(validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); - } - - /// Test that a signed tx that performs arbitrary storage writes or - /// deletes to the account is accepted. - #[test] - fn test_signed_arb_storage_write( - (vp_owner, storage_key) in arb_account_storage_subspace_key(), - // Generate bytes to write. If `None`, delete from the key instead - storage_value in any::>>(), - ) { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - // Init the VP - let difficulty = testnet_pow::Difficulty::try_new(0).unwrap(); - let withdrawal_limit = token::Amount::from_uint(MAX_FREE_DEBIT as u64, 0).unwrap(); - testnet_pow::init_faucet_storage(&mut tx_env.wl_storage, &vp_owner, difficulty, withdrawal_limit.into()).unwrap(); - - let keypair = key::testing::keypair_1(); - let public_key = &keypair.ref_to(); - - // Spawn all the accounts in the storage key to be able to modify - // their storage - let storage_key_addresses = storage_key.find_addresses(); - tx_env.spawn_accounts(storage_key_addresses); - - tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { - // Write or delete some data in the transaction - if let Some(value) = &storage_value { - tx::ctx().write(&storage_key, value).unwrap(); - } else { - tx::ctx().delete(&storage_key).unwrap(); - } - }); - - let pks_map = AccountPublicKeysMap::from_iter(vec![public_key.clone()]); - - let mut vp_env = vp_host_env::take(); - let mut tx = vp_env.tx.clone(); - tx.set_data(Data::new(vec![])); - tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( - vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, - ))); - let signed_tx = tx.clone(); - vp_env.tx = signed_tx.clone(); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); - } - } -} diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index 5f0b70b506f..a334576b530 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -185,7 +185,7 @@ fn validate_tx( mod tests { use address::testing::arb_non_internal_address; use namada::ledger::pos::{GenesisValidator, PosParams}; - use namada::proto::{Code, Data, MultiSignature}; + use namada::proto::{Code, Data, Signature}; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada::types::transaction::TxType; @@ -392,10 +392,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -561,10 +561,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -723,7 +723,11 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new(vec![*tx.data_sechash(), *tx.code_sechash()], &[keypair], &pks_map))); + tx.add_section(Section::Signature(Signature::new( + vec![*tx.data_sechash(), *tx.code_sechash()], + pks_map.index_secret_keys(vec![keypair]), + None, + ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); let keys_changed: BTreeSet = @@ -806,10 +810,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -861,10 +865,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -917,10 +921,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -973,10 +977,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -1029,10 +1033,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); diff --git a/wasm/wasm_source/src/vp_validator.rs b/wasm/wasm_source/src/vp_validator.rs index f1418e38e11..f929a8a0d1e 100644 --- a/wasm/wasm_source/src/vp_validator.rs +++ b/wasm/wasm_source/src/vp_validator.rs @@ -192,7 +192,7 @@ fn validate_tx( mod tests { use address::testing::arb_non_internal_address; use namada::ledger::pos::{GenesisValidator, PosParams}; - use namada::proto::{Code, Data, MultiSignature}; + use namada::proto::{Code, Data, Signature}; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada::types::transaction::TxType; @@ -399,10 +399,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -579,10 +579,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[secret_key], - &pks_map, + pks_map.index_secret_keys(vec![secret_key]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -741,7 +741,11 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new(vec![*tx.data_sechash(), *tx.code_sechash()], &[keypair], &pks_map))); + tx.add_section(Section::Signature(Signature::new( + vec![*tx.data_sechash(), *tx.code_sechash()], + pks_map.index_secret_keys(vec![keypair]), + None, + ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); let keys_changed: BTreeSet = @@ -823,10 +827,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -878,10 +882,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -934,10 +938,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -990,10 +994,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); @@ -1046,10 +1050,10 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_code(Code::new(vec![])); tx.set_data(Data::new(vec![])); - tx.add_section(Section::SectionSignature(MultiSignature::new( + tx.add_section(Section::Signature(Signature::new( vec![*tx.data_sechash(), *tx.code_sechash()], - &[keypair], - &pks_map, + pks_map.index_secret_keys(vec![keypair]), + None, ))); let signed_tx = tx.clone(); vp_env.tx = signed_tx.clone(); diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index 94ed2403786..a71fe71603a 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index 68c67235796..753a67c6d89 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index 0d7b20da7f0..224a9b5e408 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index b2cfea8633d..5e4f6f90904 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index fb483a0b846..bf27a5ad55d 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write.wasm b/wasm_for_tests/tx_write.wasm index 39b20ef813c..0ad94a16e72 100755 Binary files a/wasm_for_tests/tx_write.wasm and b/wasm_for_tests/tx_write.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index 003fc115be9..8e1b1f1880a 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index a18626d71ef..0d95172999d 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 98fcac06a28..b66b600d827 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 33b7c719f3e..a2adf75a672 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index 9867e710095..33625363f9a 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 2c4f12d24c7..7269fdb27ec 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -1677,8 +1677,8 @@ dependencies = [ [[package]] name = "ethbridge-bridge-contract" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethbridge-bridge-events", "ethbridge-structs", @@ -1688,30 +1688,8 @@ dependencies = [ [[package]] name = "ethbridge-bridge-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" -dependencies = [ - "ethabi", - "ethbridge-structs", - "ethers", - "ethers-contract", -] - -[[package]] -name = "ethbridge-governance-contract" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" -dependencies = [ - "ethbridge-governance-events", - "ethbridge-structs", - "ethers", - "ethers-contract", -] - -[[package]] -name = "ethbridge-governance-events" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethabi", "ethbridge-structs", @@ -1721,8 +1699,8 @@ dependencies = [ [[package]] name = "ethbridge-structs" -version = "0.23.0" -source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.23.0#1bb96e06cbc3889aa46a01e3768bf25f0c78168a" +version = "0.24.0" +source = "git+https://github.com/heliaxdev/ethbridge-rs?tag=v0.24.0#d66708bb8a734111988b9eaf08c7473bd7020c00" dependencies = [ "ethabi", "ethers", @@ -3344,7 +3322,6 @@ dependencies = [ "derivation-path", "derivative", "ethbridge-bridge-contract", - "ethbridge-governance-contract", "ethers", "eyre", "futures",