diff --git a/.changelog/v0.10.1/bug-fixes/797-fix-shielded-to-shielded.md b/.changelog/v0.10.1/bug-fixes/797-fix-shielded-to-shielded.md new file mode 100644 index 00000000000..4cb3c35949b --- /dev/null +++ b/.changelog/v0.10.1/bug-fixes/797-fix-shielded-to-shielded.md @@ -0,0 +1,2 @@ +- Avoid reading from nonexistent storage keys in shielded-to-shielded transfers. + ([#797](https://github.com/anoma/namada/pull/797)) \ No newline at end of file diff --git a/.changelog/v0.11.0/bug-fixes/754-fix-abcipp.md b/.changelog/v0.11.0/bug-fixes/754-fix-abcipp.md new file mode 100644 index 00000000000..ca804196404 --- /dev/null +++ b/.changelog/v0.11.0/bug-fixes/754-fix-abcipp.md @@ -0,0 +1,2 @@ +- Fix building with the `abcipp` feature again + ([#754](https://github.com/anoma/namada/pull/754)) \ No newline at end of file diff --git a/.changelog/v0.11.0/bug-fixes/763-init-validator-vp-validation.md b/.changelog/v0.11.0/bug-fixes/763-init-validator-vp-validation.md new file mode 100644 index 00000000000..19769dbdfaa --- /dev/null +++ b/.changelog/v0.11.0/bug-fixes/763-init-validator-vp-validation.md @@ -0,0 +1,2 @@ +- Fixed validation of a validator initialization transaction. + ([#763](https://github.com/anoma/namada/pull/763)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/582-native-token-param.md b/.changelog/v0.11.0/features/582-native-token-param.md new file mode 100644 index 00000000000..10dbb27503f --- /dev/null +++ b/.changelog/v0.11.0/features/582-native-token-param.md @@ -0,0 +1,2 @@ +- Allow to set the native token via genesis configuration. + ([#582](https://github.com/anoma/namada/pull/582)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/592-implicit-vp.md b/.changelog/v0.11.0/features/592-implicit-vp.md new file mode 100644 index 00000000000..ab93e1fc0f2 --- /dev/null +++ b/.changelog/v0.11.0/features/592-implicit-vp.md @@ -0,0 +1,6 @@ +- Added a validity predicate for implicit accounts. This is set in + protocol parameters and may be changed via governance. Additionally, + added automatic public key reveal in the client that use an implicit + account that hasn't revealed its PK yet as a source. It's also + possible to manually submit reveal transaction with client command + ([#592](https://github.com/anoma/namada/pull/592)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/687-remove-staking-address.md b/.changelog/v0.11.0/features/687-remove-staking-address.md new file mode 100644 index 00000000000..39d4def2aab --- /dev/null +++ b/.changelog/v0.11.0/features/687-remove-staking-address.md @@ -0,0 +1,2 @@ +- PoS: Removed staking reward addresses in preparation of auto-staked rewards + system. ([#687](https://github.com/anoma/namada/pull/687)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/695-validator-commission-rates.md b/.changelog/v0.11.0/features/695-validator-commission-rates.md new file mode 100644 index 00000000000..086227b5954 --- /dev/null +++ b/.changelog/v0.11.0/features/695-validator-commission-rates.md @@ -0,0 +1,4 @@ +- Allow to set validator's commission rates and a limit on change of commission + rate per epoch. Commission rate can be changed via a transaction authorized + by the validator, but the limit is immutable value, set when the validator's + account is initialized. ([#695](https://github.com/anoma/namada/pull/695)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/707-refactor-voting-powers.md b/.changelog/v0.11.0/features/707-refactor-voting-powers.md new file mode 100644 index 00000000000..76c26cab673 --- /dev/null +++ b/.changelog/v0.11.0/features/707-refactor-voting-powers.md @@ -0,0 +1,5 @@ +- Optimize the PoS code to depend only on bonded stake, removing + the VotingPower(Delta) structs. This mitigates some previous + information loss in PoS calculations. Instead, the notion of + voting power is only relevant when communicating with Tendermint. + ([#707](https://github.com/anoma/namada/pull/707)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/708-update-pos-params.md b/.changelog/v0.11.0/features/708-update-pos-params.md new file mode 100644 index 00000000000..2941c5fc4e3 --- /dev/null +++ b/.changelog/v0.11.0/features/708-update-pos-params.md @@ -0,0 +1,4 @@ +- Update the set of parameters in the PoS system according to the + latest spec and standardizes the use of the rust_decimal crate + for parameters and calculations that require fractional numbers. + ([#708](https://github.com/anoma/namada/pull/708)) \ No newline at end of file diff --git a/.changelog/v0.11.0/features/775-rename-cli-fee-args.md b/.changelog/v0.11.0/features/775-rename-cli-fee-args.md new file mode 100644 index 00000000000..a81f75ef418 --- /dev/null +++ b/.changelog/v0.11.0/features/775-rename-cli-fee-args.md @@ -0,0 +1,2 @@ +- Renamed transaction CLI arguments `--fee-amount` and `--fee-token` to `--gas- + amount` and `--gas-token`. ([#775](https://github.com/anoma/namada/pull/775)) diff --git a/.changelog/v0.11.0/improvements/436-remove-f64.md b/.changelog/v0.11.0/improvements/436-remove-f64.md new file mode 100644 index 00000000000..e55af7ee8f4 --- /dev/null +++ b/.changelog/v0.11.0/improvements/436-remove-f64.md @@ -0,0 +1,2 @@ +- Refactored token decimal formatting. + ([#436](https://github.com/anoma/namada/pull/436)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/570-rpc-sub-vp-pos.md b/.changelog/v0.11.0/improvements/570-rpc-sub-vp-pos.md new file mode 100644 index 00000000000..3abd94115b1 --- /dev/null +++ b/.changelog/v0.11.0/improvements/570-rpc-sub-vp-pos.md @@ -0,0 +1 @@ +- Added PoS specific queries ([#570](https://github.com/anoma/namada/pull/570)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/674-event-log.md b/.changelog/v0.11.0/improvements/674-event-log.md new file mode 100644 index 00000000000..8dc0efaa552 --- /dev/null +++ b/.changelog/v0.11.0/improvements/674-event-log.md @@ -0,0 +1,3 @@ +- Added a custom events store and replaced WebSocket client for + transaction results with query endpoints to the events store. + ([#674](https://github.com/anoma/namada/pull/674)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/719-refactor-governance-storage-api.md b/.changelog/v0.11.0/improvements/719-refactor-governance-storage-api.md new file mode 100644 index 00000000000..fcbbffd213b --- /dev/null +++ b/.changelog/v0.11.0/improvements/719-refactor-governance-storage-api.md @@ -0,0 +1,2 @@ +- Refactored governance code to use storage_api. + ([#719](https://github.com/anoma/namada/pull/719)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/733-core-crate-split.md b/.changelog/v0.11.0/improvements/733-core-crate-split.md new file mode 100644 index 00000000000..6ad3737caea --- /dev/null +++ b/.changelog/v0.11.0/improvements/733-core-crate-split.md @@ -0,0 +1,4 @@ +- Public parts of shared `namada` crate have been split up into a + `namada_core` crate. The `namada_proof_of_stake`, `namada_vp_prelude` + and `namada_tx_prelude` crates now depend on this `namada_core` crate. + ([#733](https://github.com/anoma/namada/pull/733)) diff --git a/.changelog/v0.11.0/improvements/807-smaller-signing.md b/.changelog/v0.11.0/improvements/807-smaller-signing.md new file mode 100644 index 00000000000..1f58798f83e --- /dev/null +++ b/.changelog/v0.11.0/improvements/807-smaller-signing.md @@ -0,0 +1,2 @@ +- Sign over the hash of code rather than code in transaction signing. + ([#807](https://github.com/anoma/namada/pull/807)) \ No newline at end of file diff --git a/.changelog/v0.11.0/miscellaneous/650-last-block.md b/.changelog/v0.11.0/miscellaneous/650-last-block.md new file mode 100644 index 00000000000..bb5f264c551 --- /dev/null +++ b/.changelog/v0.11.0/miscellaneous/650-last-block.md @@ -0,0 +1,2 @@ +- Improve some docstrings relating to block heights + ([#650](https://github.com/anoma/namada/pull/650)) \ No newline at end of file diff --git a/.changelog/v0.11.0/summary.md b/.changelog/v0.11.0/summary.md new file mode 100644 index 00000000000..e9ba3c2763c --- /dev/null +++ b/.changelog/v0.11.0/summary.md @@ -0,0 +1 @@ +Namada 0.11.0 is a scheduled minor release. diff --git a/.changelog/unreleased/testing/694-dont-spawn-internal-account-vps.md b/.changelog/v0.11.0/testing/694-dont-spawn-internal-account-vps.md similarity index 100% rename from .changelog/unreleased/testing/694-dont-spawn-internal-account-vps.md rename to .changelog/v0.11.0/testing/694-dont-spawn-internal-account-vps.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 43d9f3ea57c..fa3e5b67ed7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,6 +1,6 @@ --- name: Bug Report -about: Create a bug report for Anoma. +about: Create a bug report for Namada. labels: bug --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 0dbe6b2260a..11a3a12a471 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,6 +1,6 @@ --- name: Feature Request -about: Request a new feature in Anoma. +about: Request a new feature in Namada. labels: enhancement --- diff --git a/.github/workflows/build-and-test-bridge.yml b/.github/workflows/build-and-test-bridge.yml index affdf5b523f..2f9929cb0ae 100644 --- a/.github/workflows/build-and-test-bridge.yml +++ b/.github/workflows/build-and-test-bridge.yml @@ -30,7 +30,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} container: - image: ghcr.io/anoma/namada:wasm-0.8.0 + image: ghcr.io/anoma/namada:wasm-0.11.0 strategy: fail-fast: false matrix: @@ -136,7 +136,7 @@ jobs: BUCKET_NAME: namada-wasm-master AWS_REGION: eu-west-1 - anoma-eth: + namada-eth: runs-on: ${{ matrix.os }} timeout-minutes: 80 needs: [build-wasm] @@ -144,7 +144,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mold_version: [1.7.0] make: - name: ABCI @@ -240,7 +240,7 @@ jobs: if: always() run: sccache --stop-server || true - anoma-release-eth: + namada-release-eth: runs-on: ${{ matrix.os }} timeout-minutes: 25 strategy: @@ -342,7 +342,7 @@ jobs: run: sccache --stop-server || true - anoma-e2e-eth: + namada-e2e-eth: runs-on: ${{ matrix.os }} timeout-minutes: 80 strategy: @@ -358,14 +358,14 @@ jobs: cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) - name: e2e suffix: '' index: 1 cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release-eth (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) env: CARGO_INCREMENTAL: 0 @@ -466,7 +466,7 @@ jobs: with: name: wasm-${{ github.event.pull_request.head.sha|| github.sha }} path: ./wasm - - name: Download anoma binaries + - name: Download namada binaries uses: actions/download-artifact@v3 with: name: binaries${{ matrix.make.suffix }}-${{ github.event.pull_request.head.sha || github.sha }} @@ -481,13 +481,13 @@ jobs: - name: Run e2e test run: python3 .github/workflows/scripts/schedule-e2e.py env: - ANOMA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 - ANOMA_E2E_USE_PREBUILT_BINARIES: "true" - ANOMA_E2E_KEEP_TEMP: "true" - ANOMA_TM_STDOUT: "false" - ANOMA_LOG_COLOR: "false" - ANOMA_MASP_PARAMS_DIR: "/home/runner/work/masp" - ANOMA_LOG: "info" + NAMADA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 + NAMADA_E2E_USE_PREBUILT_BINARIES: "true" + NAMADA_E2E_KEEP_TEMP: "true" + NAMADA_TM_STDOUT: "false" + NAMADA_LOG_COLOR: "false" + NAMADA_MASP_PARAMS_DIR: "/home/runner/work/masp" + NAMADA_LOG: "info" RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" INDEX: ${{ matrix.make.index }} - name: Upload e2e logs @@ -497,11 +497,11 @@ jobs: name: logs-e2e-${{ matrix.make.index }}-${{ github.event.pull_request.head.sha || github.sha }} path: | /tmp/.*/logs/ - /tmp/.*/e2e-test.*/setup/validator-*/.anoma/logs/*.log + /tmp/.*/e2e-test.*/setup/validator-*/.namada/logs/*.log retention-days: 5 - name: Print sccache stats if: always() run: sccache --show-stats - name: Stop sccache server if: always() - run: sccache --stop-server || true \ No newline at end of file + run: sccache --stop-server || true diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index f19d842a4dc..d2be12f7267 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -31,7 +31,7 @@ jobs: timeout-minutes: 30 runs-on: ${{ matrix.os }} container: - image: ghcr.io/anoma/namada:wasm-0.8.0 + image: ghcr.io/anoma/namada:wasm-0.11.0 strategy: fail-fast: false matrix: @@ -138,7 +138,7 @@ jobs: BUCKET_NAME: namada-wasm-master AWS_REGION: eu-west-1 - anoma: + namada: runs-on: ${{ matrix.os }} timeout-minutes: 80 needs: [build-wasm] @@ -146,7 +146,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mold_version: [1.7.0] make: - name: ABCI @@ -242,7 +242,7 @@ jobs: if: always() run: sccache --stop-server || true - anoma-release: + namada-release: runs-on: ${{ matrix.os }} timeout-minutes: 25 strategy: @@ -344,7 +344,7 @@ jobs: run: sccache --stop-server || true - anoma-e2e: + namada-e2e: runs-on: ${{ matrix.os }} timeout-minutes: 80 strategy: @@ -360,14 +360,14 @@ jobs: cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) - name: e2e suffix: '' index: 1 cache_key: anoma cache_version: v2 tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 - wait_for: anoma-release (ubuntu-latest, 1.7.0, ABCI Release build, anoma-e2e-release, v2) + wait_for: namada-release (ubuntu-latest, 1.7.0, ABCI Release build, namada-e2e-release, v2) env: CARGO_INCREMENTAL: 0 @@ -468,7 +468,7 @@ jobs: with: name: wasm-${{ github.event.pull_request.head.sha|| github.sha }} path: ./wasm - - name: Download anoma binaries + - name: Download namada binaries uses: actions/download-artifact@v3 with: name: binaries${{ matrix.make.suffix }}-${{ github.event.pull_request.head.sha || github.sha }} @@ -483,13 +483,13 @@ jobs: - name: Run e2e test run: python3 .github/workflows/scripts/schedule-e2e.py env: - ANOMA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 - ANOMA_E2E_USE_PREBUILT_BINARIES: "true" - ANOMA_E2E_KEEP_TEMP: "true" - ANOMA_TM_STDOUT: "false" - ANOMA_LOG_COLOR: "false" - ANOMA_MASP_PARAMS_DIR: "/home/runner/work/masp" - ANOMA_LOG: "info" + NAMADA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 + NAMADA_E2E_USE_PREBUILT_BINARIES: "true" + NAMADA_E2E_KEEP_TEMP: "true" + NAMADA_TM_STDOUT: "false" + NAMADA_LOG_COLOR: "false" + NAMADA_MASP_PARAMS_DIR: "/home/runner/work/masp" + NAMADA_LOG: "info" RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" INDEX: ${{ matrix.make.index }} - name: Upload e2e logs @@ -499,11 +499,11 @@ jobs: name: logs-e2e-${{ matrix.make.index }}-${{ github.event.pull_request.head.sha || github.sha }} path: | /tmp/.*/logs/ - /tmp/.*/e2e-test.*/setup/validator-*/.anoma/logs/*.log + /tmp/.*/e2e-test.*/setup/validator-*/.namada/logs/*.log retention-days: 5 - name: Print sccache stats if: always() run: sccache --show-stats - name: Stop sccache server if: always() - run: sccache --stop-server || true \ No newline at end of file + run: sccache --stop-server || true diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 22792c8d9b0..7364a477fab 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -27,7 +27,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] make: - name: Clippy command: clippy diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 51e19208e3a..260b0de4c6a 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -20,7 +20,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] make: - name: Audit command: audit diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index cb8633c99a8..b015bbd7cce 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -27,7 +27,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - nightly_version: [nightly-2022-05-20] + nightly_version: [nightly-2022-11-03] mdbook_version: [rust-lang/mdbook@v0.4.18] mdbook_mermaid: [badboy/mdbook-mermaid@v0.11.1] mdbook_linkcheck: [Michael-F-Bryan/mdbook-linkcheck@v0.7.6] diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5cbc1e9207a..51bd8ea3e81 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - anoma_cache_version: [v1] + namada_cache_version: [v1] make: - name: Build package command: package @@ -70,8 +70,8 @@ jobs: path: | ~/.cargo/registry ~/.cargo/git - key: ${{ runner.os }}-anoma-release-${{ matrix.anoma_cache_version }}-${{ hashFiles('**/Cargo.lock') }} - restore-keys: ${{ runner.os }}-anoma-release-${{ matrix.anoma_cache_version }} + key: ${{ runner.os }}-namada-release-${{ matrix.namada_cache_version }}-${{ hashFiles('**/Cargo.lock') }} + restore-keys: ${{ runner.os }}-namada-release-${{ matrix.namada_cache_version }} - name: Start sccache server run: sccache --start-server - name: ${{ matrix.make.name }} diff --git a/.github/workflows/scripts/e2e.json b/.github/workflows/scripts/e2e.json index fd8939ba9a0..99a41ad092b 100644 --- a/.github/workflows/scripts/e2e.json +++ b/.github/workflows/scripts/e2e.json @@ -13,7 +13,7 @@ "e2e::ledger_tests::proposal_submission": 35, "e2e::ledger_tests::run_ledger": 5, "e2e::ledger_tests::run_ledger_load_state_and_reset": 5, - "e2e::ledger_tests::test_anoma_shuts_down_if_tendermint_dies": 2, + "e2e::ledger_tests::test_namada_shuts_down_if_tendermint_dies": 2, "e2e::ledger_tests::test_genesis_validators": 9, "e2e::ledger_tests::test_node_connectivity_and_consensus": 20, "e2e::wallet_tests::wallet_address_cmds": 1, diff --git a/.gitignore b/.gitignore index 4718c258b2d..dec9856215f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ -# Anoma default home dir +# Namada default home dir +/.namada /.anoma # Generated by Cargo @@ -7,8 +8,8 @@ debug/ target/ # Release packages -/anoma-*/ -/anoma-*.tar.gz +/namada-*/ +/namada-*.tar.gz # These are backup files generated by rustfmt **/*.rs.bk @@ -26,4 +27,4 @@ target/ wasm/*.wasm # app version string file -/apps/version.rs \ No newline at end of file +/apps/version.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a9cf655a04..c11e94955aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,80 @@ # CHANGELOG +## v0.11.0 + +Namada 0.11.0 is a scheduled minor release. + +### BUG FIXES + +- Fix building with the `abcipp` feature again + ([#754](https://github.com/anoma/namada/pull/754)) +- Fixed validation of a validator initialization transaction. + ([#763](https://github.com/anoma/namada/pull/763)) + +### FEATURES + +- Allow to set the native token via genesis configuration. + ([#582](https://github.com/anoma/namada/pull/582)) +- Added a validity predicate for implicit accounts. This is set in + protocol parameters and may be changed via governance. Additionally, + added automatic public key reveal in the client that use an implicit + account that hasn't revealed its PK yet as a source. It's also + possible to manually submit reveal transaction with client command + ([#592](https://github.com/anoma/namada/pull/592)) +- PoS: Removed staking reward addresses in preparation of auto-staked rewards + system. ([#687](https://github.com/anoma/namada/pull/687)) +- Allow to set validator's commission rates and a limit on change of commission + rate per epoch. Commission rate can be changed via a transaction authorized + by the validator, but the limit is immutable value, set when the validator's + account is initialized. ([#695](https://github.com/anoma/namada/pull/695)) +- Optimize the PoS code to depend only on bonded stake, removing + the VotingPower(Delta) structs. This mitigates some previous + information loss in PoS calculations. Instead, the notion of + voting power is only relevant when communicating with Tendermint. + ([#707](https://github.com/anoma/namada/pull/707)) +- Update the set of parameters in the PoS system according to the + latest spec and standardizes the use of the rust_decimal crate + for parameters and calculations that require fractional numbers. + ([#708](https://github.com/anoma/namada/pull/708)) +- Renamed transaction CLI arguments `--fee-amount` and `--fee-token` to `--gas- + amount` and `--gas-token`. ([#775](https://github.com/anoma/namada/pull/775)) + +### IMPROVEMENTS + +- Refactored token decimal formatting. + ([#436](https://github.com/anoma/namada/pull/436)) +- Added PoS specific queries ([#570](https://github.com/anoma/namada/pull/570)) +- Added a custom events store and replaced WebSocket client for + transaction results with query endpoints to the events store. + ([#674](https://github.com/anoma/namada/pull/674)) +- Refactored governance code to use storage_api. + ([#719](https://github.com/anoma/namada/pull/719)) +- Public parts of shared `namada` crate have been split up into a + `namada_core` crate. The `namada_proof_of_stake`, `namada_vp_prelude` + and `namada_tx_prelude` crates now depend on this `namada_core` crate. + ([#733](https://github.com/anoma/namada/pull/733)) +- Sign over the hash of code rather than code in transaction signing. + ([#807](https://github.com/anoma/namada/pull/807)) + +### MISCELLANEOUS + +- Improve some docstrings relating to block heights + ([#650](https://github.com/anoma/namada/pull/650)) + +### TESTING + +- Don't fake a wasm VP for internal addresses in tx tests + ([#694](https://github.com/anoma/namada/pull/694)) + ## v0.10.1 Namada 0.10.1 is a point release with fixes to shielded transactions. +### BUG FIXES + +- Avoid reading from nonexistent storage keys in shielded-to-shielded transfers. + ([#797](https://github.com/anoma/namada/pull/797)) + ## v0.10.0 Namada 0.10.0 is a scheduled minor release, focused on IBC and MASP diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c00d3f08ec8..027c8dc4da6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Contributing to Anoma +# Contributing to Namada -Thank you for the interest in contributing to Anoma! +Thank you for the interest in contributing to Namada! All contributors are expected to follow the [Code of Conduct](CODE_OF_CONDUCT.md). @@ -12,7 +12,7 @@ Every pull request should start with an issue. A pull request should be as atomi ### Changelog -To track changes in Anoma and provide a nicely formatted change log with the releases, we utilize the [unclog CLI tool](https://github.com/informalsystems/unclog). Please do not modify the [change log](CHANGELOG.md) in your PRs, this file will be updated by the repository maintainers. +To track changes in Namada and provide a nicely formatted change log with the releases, we utilize the [unclog CLI tool](https://github.com/informalsystems/unclog). Please do not modify the [change log](CHANGELOG.md) in your PRs, this file will be updated by the repository maintainers. With every PR, please make a separate commit that adds a record in the `.changelog` directory with a section that this PR belongs to together with a high-level description of the change. @@ -37,7 +37,7 @@ unclog add \ --message ``` -The message should be a high-level description of the changes that should explain the scope of the change and affected components to Anoma's users (while git commit messages should target developers). +The message should be a high-level description of the changes that should explain the scope of the change and affected components to Namada's users (while git commit messages should target developers). If none of the sections fit, new sections may be added. To find the existing section names, you can use e.g.: diff --git a/Cargo.lock b/Cargo.lock index a8a2609943c..fcfc0371f96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2424,8 +2424,7 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "funty" version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +source = "git+https://github.com/bitvecto-rs/funty/?rev=7ef0d890fbcd8b3def1635ac1a877fc298488446#7ef0d890fbcd8b3def1635ac1a877fc298488446" [[package]] name = "funty" @@ -4031,49 +4030,36 @@ dependencies = [ [[package]] name = "namada" -version = "0.10.1" +version = "0.11.0" dependencies = [ - "ark-bls12-381", - "ark-ec", - "ark-serialize", "assert_matches", "async-trait", - "bech32", "bellman", - "bit-vec", "bls12_381", "borsh", "byte-unit", - "chrono", "circular-queue", "clru", "data-encoding", "derivative", - "ed25519-consensus", - "ethabi", "eyre", - "ferveo", "ferveo-common", - "group-threshold-cryptography", - "hex", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", - "ics23", "itertools", "libsecp256k1", "loupe", "masp_primitives", "masp_proofs", + "namada_core", "namada_proof_of_stake", - "num-rational 0.4.1", "parity-wasm", "paste", "pretty_assertions", "proptest", "prost", - "prost-types", "pwasm-utils", "rand 0.8.5", "rand_core 0.6.4", @@ -4082,7 +4068,6 @@ dependencies = [ "serde 1.0.147", "serde_json", "sha2 0.9.9", - "sparse-merkle-tree", "tempfile", "tendermint 0.23.5", "tendermint 0.23.6", @@ -4092,10 +4077,8 @@ dependencies = [ "tendermint-rpc 0.23.6", "test-log", "thiserror", - "tiny-keccak", "tokio", "toml", - "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", "wasmer", @@ -4110,7 +4093,7 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.10.1" +version = "0.11.0" dependencies = [ "ark-serialize", "ark-std", @@ -4165,6 +4148,8 @@ dependencies = [ "rlimit", "rocksdb", "rpassword", + "rust_decimal", + "rust_decimal_macros", "semver 1.0.14", "serde 1.0.147", "serde_bytes", @@ -4202,9 +4187,65 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "namada_core" +version = "0.11.0" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-serialize", + "assert_matches", + "bech32", + "bellman", + "bit-vec", + "borsh", + "chrono", + "data-encoding", + "derivative", + "ed25519-consensus", + "ethabi", + "eyre", + "ferveo", + "ferveo-common", + "group-threshold-cryptography", + "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "ibc 0.14.0 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs?rev=9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d)", + "ibc-proto 0.17.1 (git+https://github.com/heliaxdev/ibc-rs.git?rev=f4703dfe2c1f25cc431279ab74f10f3e0f6827e2)", + "ics23", + "itertools", + "libsecp256k1", + "masp_primitives", + "num-rational 0.4.1", + "pretty_assertions", + "proptest", + "prost", + "prost-types", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "rust_decimal", + "rust_decimal_macros", + "serde 1.0.147", + "serde_json", + "sha2 0.9.9", + "sparse-merkle-tree", + "tendermint 0.23.5", + "tendermint 0.23.6", + "tendermint-proto 0.23.5", + "tendermint-proto 0.23.6", + "test-log", + "thiserror", + "tiny-keccak", + "tonic-build", + "tracing 0.1.37", + "tracing-subscriber 0.3.16", + "zeroize", +] + [[package]] name = "namada_encoding_spec" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "itertools", @@ -4215,7 +4256,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.10.1" +version = "0.11.0" dependencies = [ "quote", "syn", @@ -4223,17 +4264,21 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "derivative", + "namada_core", "proptest", + "rust_decimal", + "rust_decimal_macros", "thiserror", + "tracing 0.1.37", ] [[package]] name = "namada_tests" -version = "0.10.1" +version = "0.11.0" dependencies = [ "assert_cmd", "borsh", @@ -4259,6 +4304,8 @@ dependencies = [ "proptest", "prost", "rand 0.8.5", + "rust_decimal", + "rust_decimal_macros", "serde_json", "sha2 0.9.9", "tempfile", @@ -4275,35 +4322,38 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "masp_primitives", - "namada", + "namada_core", "namada_macros", + "namada_proof_of_stake", "namada_vm_env", + "rust_decimal", "sha2 0.10.6", "thiserror", ] [[package]] name = "namada_vm_env" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", "hex", "masp_primitives", "masp_proofs", - "namada", + "namada_core", ] [[package]] name = "namada_vp_prelude" -version = "0.10.1" +version = "0.11.0" dependencies = [ "borsh", - "namada", + "namada_core", "namada_macros", + "namada_proof_of_stake", "namada_vm_env", "sha2 0.10.6", "thiserror", @@ -5812,10 +5862,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" dependencies = [ "arrayvec 0.7.2", + "borsh", "num-traits 0.2.15", "serde 1.0.147", ] +[[package]] +name = "rust_decimal_macros" +version = "1.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4903d8db81d2321699ca8318035d6ff805c548868df435813968795a802171b2" +dependencies = [ + "quote", + "rust_decimal", +] + [[package]] name = "rustc-demangle" version = "0.1.21" diff --git a/Cargo.toml b/Cargo.toml index dea09bc6d02..42a99343fc6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "apps", + "core", "proof_of_stake", "shared", "tests", @@ -51,6 +52,9 @@ ibc-relayer = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2 # patched to a commit on the `eth-bridge-integration` branch of our fork tower-abci = {git = "https://github.com/heliaxdev/tower-abci.git", rev = "fcc0014d0bda707109901abfa1b2f782d242f082"} +# patched to the yanked 1.2.0 until masp updates bitvec +funty = { git = "https://github.com/bitvecto-rs/funty/", rev = "7ef0d890fbcd8b3def1635ac1a877fc298488446" } + [profile.release] lto = true opt-level = 3 diff --git a/Makefile b/Makefile index 81619c475ce..45f8a300fc7 100644 --- a/Makefile +++ b/Makefile @@ -23,13 +23,13 @@ build-test: $(cargo) build --tests build-release: - ANOMA_DEV=false $(cargo) build --release --package namada_apps --manifest-path Cargo.toml + NAMADA_DEV=false $(cargo) build --release --package namada_apps --manifest-path Cargo.toml install-release: - ANOMA_DEV=false $(cargo) install --path ./apps --locked + NAMADA_DEV=false $(cargo) install --path ./apps --locked check-release: - ANOMA_DEV=false $(cargo) check --release --package namada_apps + NAMADA_DEV=false $(cargo) check --release --package namada_apps package: build-release scripts/make-package.sh @@ -41,16 +41,24 @@ check: make -C $(wasms_for_tests) check && \ $(foreach wasm,$(wasm_templates),$(check-wasm) && ) true +check-abcipp: + $(cargo) check \ + --workspace \ + --exclude namada_tests \ + --all-targets \ + --no-default-features \ + --features "abcipp ibc-mocks-abcipp testing" + clippy-wasm = $(cargo) +$(nightly) clippy --manifest-path $(wasm)/Cargo.toml --all-targets -- -D warnings clippy: - ANOMA_DEV=false $(cargo) +$(nightly) clippy --all-targets -- -D warnings && \ + NAMADA_DEV=false $(cargo) +$(nightly) clippy --all-targets -- -D warnings && \ make -C $(wasms) clippy && \ make -C $(wasms_for_tests) clippy && \ $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true clippy-abcipp: - ANOMA_DEV=false $(cargo) +$(nightly) clippy --all-targets \ + NAMADA_DEV=false $(cargo) +$(nightly) clippy --all-targets \ --manifest-path ./apps/Cargo.toml \ --no-default-features \ --features "std testing abcipp" && \ @@ -58,22 +66,18 @@ clippy-abcipp: --manifest-path ./proof_of_stake/Cargo.toml \ --features "testing" && \ $(cargo) +$(nightly) clippy --all-targets \ - --manifest-path ./shared/Cargo.toml \ + --manifest-path ./core/Cargo.toml \ --no-default-features \ - --features "testing wasm-runtime abcipp ibc-mocks-abcipp" && \ - $(cargo) +$(nightly) clippy \ - --all-targets \ - --manifest-path ./vm_env/Cargo.toml \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" + $(cargo) +$(nightly) clippy --all-targets \ + --manifest-path ./shared/Cargo.toml \ --no-default-features \ - --features "abcipp" && \ - make -C $(wasms) clippy && \ - $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true - + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" clippy-fix: $(cargo) +$(nightly) clippy --fix -Z unstable-options --all-targets --allow-dirty --allow-staged install: tendermint - ANOMA_DEV=false $(cargo) install --path ./apps --locked + NAMADA_DEV=false $(cargo) install --path ./apps --locked tendermint: ./scripts/get_tendermint.sh @@ -117,16 +121,22 @@ test-unit-abcipp: --features "testing" \ $(TEST_FILTER) -- \ -Z unstable-options --report-time && \ + $(cargo) test \ + --manifest-path ./core/Cargo.toml \ + --no-default-features \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" \ + $(TEST_FILTER) -- \ + -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path ./shared/Cargo.toml \ --no-default-features \ - --features "testing wasm-runtime abcipp ibc-mocks-abcipp" \ + --features "testing wasm-runtime abcipp ibc-mocks-abcipp ferveo-tpke" \ $(TEST_FILTER) -- \ -Z unstable-options --report-time && \ $(cargo) test \ --manifest-path ./vm_env/Cargo.toml \ --no-default-features \ - --features "abcipp" \ + --features "namada_core/abcipp" \ $(TEST_FILTER) -- \ -Z unstable-options --report-time @@ -191,16 +201,18 @@ build-wasm-scripts-docker: build-wasm-image-docker docker run --rm -v ${PWD}:/__w/namada/namada namada-wasm make build-wasm-scripts debug-wasm-scripts-docker: build-wasm-image-docker - docker run --rm -v ${PWD}:/usr/local/rust/wasm anoma-wasm make debug-wasm-scripts + docker run --rm -v ${PWD}:/usr/local/rust/wasm namada-wasm make debug-wasm-scripts # Build the validity predicate and transactions wasm build-wasm-scripts: + rm wasm/*.wasm || true make -C $(wasms) make opt-wasm make checksum-wasm -# Debug build the validity predicate, transactions, matchmaker and matchmaker filter wasm +# Debug build the validity predicate and transactions wasm debug-wasm-scripts: + rm wasm/*.wasm || true make -C $(wasms) debug make opt-wasm make checksum-wasm diff --git a/README.md b/README.md index b96cb34229e..06fe8b96212 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ the form of native protocol tokens. A multi-asset shielded transfer wallet is provided in order to facilitate safe and private user interaction with the protocol. -* Blogpost: [Introducing Namada: Shielded transfers with any assets](https://medium.com/anomanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c) +* Blogpost: [Introducing Namada: Shielded transfers with any assets](https://medium.com/namadanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c) ## ๐Ÿ““ Docs @@ -29,13 +29,13 @@ interaction with the protocol. ## ๐Ÿ’พ Installing -There is a single command to build and install Anoma executables from source (the node, the client and the wallet). This command will also verify that a compatible version of [Tendermint](#dependencies) is available and if not, attempt to install it. Note that currently at least 16GB RAM is needed to build from source. +There is a single command to build and install Namada executables from source (the node, the client and the wallet). This command will also verify that a compatible version of [Tendermint](#dependencies) is available and if not, attempt to install it. Note that currently at least 16GB RAM is needed to build from source. ```shell make install ``` -After installation, the main `anoma` executable will be available on path. +After installation, the main `namada` executable will be available on path. To find how to use it, check out the [User Guide section of the docs](https://docs.namada.net/user-guide/index.html). @@ -49,9 +49,9 @@ Guide. # Build the provided validity predicate and transaction wasm modules make build-wasm-scripts-docker -# Development (debug) build Anoma, which includes a validator and some default +# Development (debug) build Namada, which includes a validator and some default # accounts, whose keys and addresses are available in the wallet -ANOMA_DEV=true make +NAMADA_DEV=true make ``` ### Before submitting a PR, pls make sure to run the following @@ -66,7 +66,7 @@ make clippy ## ๐Ÿงพ Logging -To change the log level, set `ANOMA_LOG` environment variable to one of: +To change the log level, set `NAMADA_LOG` environment variable to one of: * `error` * `warn` diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 300b78a9624..1fc0d75becb 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_apps" readme = "../README.md" resolver = "2" -version = "0.10.1" +version = "0.11.0" default-run = "namada" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -19,31 +19,31 @@ path = "src/lib/mod.rs" [[bin]] doc = false name = "namada" -path = "src/bin/anoma/main.rs" +path = "src/bin/namada/main.rs" # Namada node [[bin]] doc = false name = "namadan" -path = "src/bin/anoma-node/main.rs" +path = "src/bin/namada-node/main.rs" # Namada client [[bin]] doc = false name = "namadac" -path = "src/bin/anoma-client/main.rs" +path = "src/bin/namada-client/main.rs" # Namada wallet [[bin]] doc = false name = "namadaw" -path = "src/bin/anoma-wallet/main.rs" +path = "src/bin/namada-wallet/main.rs" # Namada relayer [[bin]] doc = false name = "namadar" -path = "src/bin/anoma-relayer/main.rs" +path = "src/bin/namada-relayer/main.rs" [features] default = ["std", "abciplus"] @@ -52,25 +52,28 @@ std = ["ed25519-consensus/std", "rand/std", "rand_core/std"] # for integration tests and test utilies testing = ["dev"] abcipp = [ + "namada/abcipp", + "namada/tendermint-rpc-abcipp", "tendermint-abcipp", "tendermint-config-abcipp", "tendermint-proto-abcipp", "tendermint-rpc-abcipp", "tower-abci-abcipp", - "namada/abcipp" + "namada/tendermint-abcipp" ] abciplus = [ + "namada/abciplus", + "namada/tendermint-rpc", "tendermint", "tendermint-config", "tendermint-rpc", "tendermint-proto", "tower-abci", - "namada/abciplus" ] [dependencies] -namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "tendermint-rpc", "secp256k1-sign-verify"]} +namada = {path = "../shared", default-features = false, features = ["wasm-runtime", "ferveo-tpke"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" @@ -156,13 +159,15 @@ winapi = "0.3.9" masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", features = ["transparent-inputs"] } masp_proofs = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c", features = ["bundled-prover", "download-params"] } bimap = {version = "0.6.2", features = ["serde"]} +rust_decimal = "1.26.1" +rust_decimal_macros = "1.26.1" warp = "0.3.2" bytes = "1.1.0" [dev-dependencies] assert_matches = "1.5.0" -namada = {path = "../shared", features = ["testing", "wasm-runtime"]} +namada = {path = "../shared", default-features = false, features = ["testing", "wasm-runtime"]} bit-set = "0.5.2" # A fork with state machime testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} diff --git a/apps/build.rs b/apps/build.rs index 32f7c57e875..60735e7e4aa 100644 --- a/apps/build.rs +++ b/apps/build.rs @@ -24,7 +24,7 @@ fn main() { }; let mut version_rs = File::create("./version.rs").expect("cannot write version"); - let pre = "pub fn anoma_version() -> &'static str { \""; + let pre = "pub fn namada_version() -> &'static str { \""; let post = "\" }"; match version_string { Some(version_string) => { @@ -54,10 +54,10 @@ fn main() { // Tell Cargo that if the given file changes, to rerun this build script. println!("cargo:rerun-if-changed={}", PROTO_SRC); - // Tell Cargo to build when the `ANOMA_DEV` env var changes - println!("cargo:rerun-if-env-changed=ANOMA_DEV"); - // Enable "dev" feature if `ANOMA_DEV` is trueish - if let Ok(dev) = env::var("ANOMA_DEV") { + // Tell Cargo to build when the `NAMADA_DEV` env var changes + println!("cargo:rerun-if-env-changed=NAMADA_DEV"); + // Enable "dev" feature if `NAMADA_DEV` is trueish + if let Ok(dev) = env::var("NAMADA_DEV") { if dev.to_ascii_lowercase().trim() == "true" { println!("cargo:rustc-cfg=feature=\"dev\""); } diff --git a/apps/src/bin/anoma-client/cli.rs b/apps/src/bin/namada-client/cli.rs similarity index 86% rename from apps/src/bin/anoma-client/cli.rs rename to apps/src/bin/namada-client/cli.rs index 71cd04c94cb..a5cc70451b3 100644 --- a/apps/src/bin/anoma-client/cli.rs +++ b/apps/src/bin/namada-client/cli.rs @@ -1,4 +1,4 @@ -//! Anoma client CLI. +//! Namada client CLI. use color_eyre::eyre::Result; use namada_apps::cli; @@ -6,10 +6,10 @@ use namada_apps::cli::cmds::*; use namada_apps::client::{eth_bridge_pool, rpc, tx, utils}; pub async fn main() -> Result<()> { - match cli::anoma_client_cli()? { - cli::AnomaClient::WithContext(cmd_box) => { + match cli::namada_client_cli()? { + cli::NamadaClient::WithContext(cmd_box) => { let (cmd, ctx) = *cmd_box; - use AnomaClientWithContext as Sub; + use NamadaClientWithContext as Sub; match cmd { // Ledger cmds Sub::TxCustom(TxCustom(args)) => { @@ -36,6 +36,9 @@ pub async fn main() -> Result<()> { Sub::TxVoteProposal(TxVoteProposal(args)) => { tx::submit_vote_proposal(ctx, args).await; } + Sub::TxRevealPk(TxRevealPk(args)) => { + tx::submit_reveal_pk(ctx, args).await; + } Sub::Bond(Bond(args)) => { tx::submit_bond(ctx, args).await; } @@ -68,8 +71,11 @@ pub async fn main() -> Result<()> { Sub::QueryBonds(QueryBonds(args)) => { rpc::query_bonds(ctx, args).await; } - Sub::QueryVotingPower(QueryVotingPower(args)) => { - rpc::query_voting_power(ctx, args).await; + Sub::QueryBondedStake(QueryBondedStake(args)) => { + rpc::query_bonded_stake(ctx, args).await; + } + Sub::QueryCommissionRate(QueryCommissionRate(args)) => { + rpc::query_commission_rate(ctx, args).await; } Sub::QuerySlashes(QuerySlashes(args)) => { rpc::query_slashes(ctx, args).await; @@ -92,7 +98,7 @@ pub async fn main() -> Result<()> { } } } - cli::AnomaClient::WithoutContext(cmd, global_args) => match cmd { + cli::NamadaClient::WithoutContext(cmd, global_args) => match cmd { // Utils cmds Utils::JoinNetwork(JoinNetwork(args)) => { utils::join_network(global_args, args).await diff --git a/apps/src/bin/anoma-client/main.rs b/apps/src/bin/namada-client/main.rs similarity index 100% rename from apps/src/bin/anoma-client/main.rs rename to apps/src/bin/namada-client/main.rs diff --git a/apps/src/bin/anoma-node/cli.rs b/apps/src/bin/namada-node/cli.rs similarity index 86% rename from apps/src/bin/anoma-node/cli.rs rename to apps/src/bin/namada-node/cli.rs index d2ce7b608a6..48f67d3273b 100644 --- a/apps/src/bin/anoma-node/cli.rs +++ b/apps/src/bin/namada-node/cli.rs @@ -1,26 +1,26 @@ -//! Anoma node CLI. +//! Namada node CLI. use eyre::{Context, Result}; use namada_apps::cli::{self, cmds}; use namada_apps::node::ledger; pub fn main() -> Result<()> { - let (cmd, mut ctx) = cli::anoma_node_cli()?; + let (cmd, mut ctx) = cli::namada_node_cli()?; if let Some(mode) = ctx.global_args.mode.clone() { ctx.config.ledger.tendermint.tendermint_mode = mode; } match cmd { - cmds::AnomaNode::Ledger(sub) => match sub { + cmds::NamadaNode::Ledger(sub) => match sub { cmds::Ledger::Run(_) => { let wasm_dir = ctx.wasm_dir(); ledger::run(ctx.config.ledger, wasm_dir); } cmds::Ledger::Reset(_) => { ledger::reset(ctx.config.ledger) - .wrap_err("Failed to reset Anoma node")?; + .wrap_err("Failed to reset Namada node")?; } }, - cmds::AnomaNode::Config(sub) => match sub { + cmds::NamadaNode::Config(sub) => match sub { cmds::Config::Gen(cmds::ConfigGen) => { // If the config doesn't exit, it gets generated in the context. // In here, we just need to overwrite the default chain ID, in diff --git a/apps/src/bin/anoma-node/main.rs b/apps/src/bin/namada-node/main.rs similarity index 100% rename from apps/src/bin/anoma-node/main.rs rename to apps/src/bin/namada-node/main.rs diff --git a/apps/src/bin/anoma-relayer/cli.rs b/apps/src/bin/namada-relayer/cli.rs similarity index 86% rename from apps/src/bin/anoma-relayer/cli.rs rename to apps/src/bin/namada-relayer/cli.rs index 4dbab91796c..16576a2a82b 100644 --- a/apps/src/bin/anoma-relayer/cli.rs +++ b/apps/src/bin/namada-relayer/cli.rs @@ -1,4 +1,4 @@ -//! Anoma client CLI. +//! Namada client CLI. use color_eyre::eyre::Result; use namada_apps::cli; @@ -6,7 +6,7 @@ use namada_apps::cli::cmds; use namada_apps::client::eth_bridge_pool; pub async fn main() -> Result<()> { - let (cmd, _) = cli::anoma_relayer_cli()?; + let (cmd, _) = cli::namada_relayer_cli()?; use cmds::EthBridgePool as Sub; match cmd { Sub::ConstructProof(args) => { diff --git a/apps/src/bin/anoma-relayer/main.rs b/apps/src/bin/namada-relayer/main.rs similarity index 100% rename from apps/src/bin/anoma-relayer/main.rs rename to apps/src/bin/namada-relayer/main.rs diff --git a/apps/src/bin/anoma-wallet/README.md b/apps/src/bin/namada-wallet/README.md similarity index 95% rename from apps/src/bin/anoma-wallet/README.md rename to apps/src/bin/namada-wallet/README.md index 147cecd4fe2..ec42ae2346c 100644 --- a/apps/src/bin/anoma-wallet/README.md +++ b/apps/src/bin/namada-wallet/README.md @@ -1,4 +1,4 @@ -# Anoma CLI wallet +# Namada CLI wallet ## Features diff --git a/apps/src/bin/anoma-wallet/cli.rs b/apps/src/bin/namada-wallet/cli.rs similarity index 98% rename from apps/src/bin/anoma-wallet/cli.rs rename to apps/src/bin/namada-wallet/cli.rs index 970cf8d9080..82a994b0aca 100644 --- a/apps/src/bin/anoma-wallet/cli.rs +++ b/apps/src/bin/namada-wallet/cli.rs @@ -1,4 +1,4 @@ -//! Anoma Wallet CLI. +//! Namada Wallet CLI. use std::fs::File; use std::io::{self, Write}; @@ -16,9 +16,9 @@ use namada_apps::wallet::{DecryptionError, FindKeyError}; use rand_core::OsRng; pub fn main() -> Result<()> { - let (cmd, ctx) = cli::anoma_wallet_cli()?; + let (cmd, ctx) = cli::namada_wallet_cli()?; match cmd { - cmds::AnomaWallet::Key(sub) => match sub { + cmds::NamadaWallet::Key(sub) => match sub { cmds::WalletKey::Gen(cmds::KeyGen(args)) => { key_and_address_gen(ctx, args) } @@ -28,7 +28,7 @@ pub fn main() -> Result<()> { key_export(ctx, args) } }, - cmds::AnomaWallet::Address(sub) => match sub { + cmds::NamadaWallet::Address(sub) => match sub { cmds::WalletAddress::Gen(cmds::AddressGen(args)) => { key_and_address_gen(ctx, args) } @@ -40,7 +40,7 @@ pub fn main() -> Result<()> { address_add(ctx, args) } }, - cmds::AnomaWallet::Masp(sub) => match sub { + cmds::NamadaWallet::Masp(sub) => match sub { cmds::WalletMasp::GenSpendKey(cmds::MaspGenSpendKey(args)) => { spending_key_gen(ctx, args) } @@ -455,8 +455,7 @@ fn address_or_alias_find(ctx: Context, args: args::AddressOrAliasFind) { message." ); } else if args.alias.is_some() { - if let Some(address) = - wallet.find_address(&args.alias.as_ref().unwrap()) + if let Some(address) = wallet.find_address(args.alias.as_ref().unwrap()) { println!("Found address {}", address.to_pretty_string()); } else { diff --git a/apps/src/bin/anoma-wallet/main.rs b/apps/src/bin/namada-wallet/main.rs similarity index 100% rename from apps/src/bin/anoma-wallet/main.rs rename to apps/src/bin/namada-wallet/main.rs diff --git a/apps/src/bin/anoma/cli.rs b/apps/src/bin/namada/cli.rs similarity index 81% rename from apps/src/bin/anoma/cli.rs rename to apps/src/bin/namada/cli.rs index 5fbf363c33d..88d09da0cf1 100644 --- a/apps/src/bin/anoma/cli.rs +++ b/apps/src/bin/namada/cli.rs @@ -1,8 +1,8 @@ -//! Anoma CLI. +//! Namada CLI. //! //! This CLI groups together the most commonly used commands inlined from the //! node and the client. The other commands for the node, client and wallet can -//! be dispatched via `anoma node ...`, `anoma client ...` or `anoma wallet +//! be dispatched via `namada node ...`, `namada client ...` or `namada wallet //! ...`, respectively. use std::env; @@ -12,18 +12,18 @@ use eyre::Result; use namada_apps::cli; pub fn main() -> Result<()> { - let (cmd, raw_sub_cmd) = cli::anoma_cli(); + let (cmd, raw_sub_cmd) = cli::namada_cli(); handle_command(cmd, raw_sub_cmd) } -fn handle_command(cmd: cli::cmds::Anoma, raw_sub_cmd: String) -> Result<()> { +fn handle_command(cmd: cli::cmds::Namada, raw_sub_cmd: String) -> Result<()> { let args = env::args(); let is_bin_sub_cmd = matches!( cmd, - cli::cmds::Anoma::Node(_) - | cli::cmds::Anoma::Client(_) - | cli::cmds::Anoma::Wallet(_) + cli::cmds::Namada::Node(_) + | cli::cmds::Namada::Client(_) + | cli::cmds::Namada::Wallet(_) ); // Skip the first arg, which is the name of the binary @@ -39,20 +39,21 @@ fn handle_command(cmd: cli::cmds::Anoma, raw_sub_cmd: String) -> Result<()> { } match cmd { - cli::cmds::Anoma::Node(_) | cli::cmds::Anoma::Ledger(_) => { + cli::cmds::Namada::Node(_) | cli::cmds::Namada::Ledger(_) => { handle_subcommand("namadan", sub_args) } - cli::cmds::Anoma::Client(_) - | cli::cmds::Anoma::TxCustom(_) - | cli::cmds::Anoma::TxTransfer(_) - | cli::cmds::Anoma::TxIbcTransfer(_) - | cli::cmds::Anoma::TxUpdateVp(_) - | cli::cmds::Anoma::TxInitProposal(_) - | cli::cmds::Anoma::TxVoteProposal(_) => { + cli::cmds::Namada::Client(_) + | cli::cmds::Namada::TxCustom(_) + | cli::cmds::Namada::TxTransfer(_) + | cli::cmds::Namada::TxIbcTransfer(_) + | cli::cmds::Namada::TxUpdateVp(_) + | cli::cmds::Namada::TxRevealPk(_) + | cli::cmds::Namada::TxInitProposal(_) + | cli::cmds::Namada::TxVoteProposal(_) => { handle_subcommand("namadac", sub_args) } - cli::cmds::Anoma::Wallet(_) => handle_subcommand("namadaw", sub_args), - cli::cmds::Anoma::EthBridgePool(_) => { + cli::cmds::Namada::Wallet(_) => handle_subcommand("namadaw", sub_args), + cli::cmds::Namada::EthBridgePool(_) => { handle_subcommand("namadar", sub_args) } } @@ -73,8 +74,8 @@ fn handle_subcommand(program: &str, mut sub_args: Vec) -> Result<()> { } else { // Get the full path to the program to be inside the parent directory of // the current process - let anoma_path = env::current_exe()?; - anoma_path.parent().unwrap().join(program) + let namada_path = env::current_exe()?; + namada_path.parent().unwrap().join(program) }; let mut cmd = Command::new(cmd_name); diff --git a/apps/src/bin/anoma/main.rs b/apps/src/bin/namada/main.rs similarity index 100% rename from apps/src/bin/anoma/main.rs rename to apps/src/bin/namada/main.rs diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 41a21cf6387..9607d3e2762 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -1,9 +1,9 @@ -//! The CLI commands that are re-used between the executables `anoma`, -//! `anoma-node` and `anoma-client`. +//! The CLI commands that are re-used between the executables `namada`, +//! `namada-node` and `namada-client`. //! -//! The `anoma` executable groups together the most commonly used commands +//! The `namada` executable groups together the most commonly used commands //! inlined from the node and the client. The other commands for the node or the -//! client can be dispatched via `anoma node ...` or `anoma client ...`, +//! client can be dispatched via `namada node ...` or `namada client ...`, //! respectively. pub mod context; @@ -20,7 +20,7 @@ include!("../../version.rs"); const APP_NAME: &str = "Namada"; -// Main Anoma sub-commands +// Main Namada sub-commands const NODE_CMD: &str = "node"; const CLIENT_CMD: &str = "client"; const WALLET_CMD: &str = "wallet"; @@ -33,14 +33,14 @@ pub mod cmds { use super::{args, ArgMatches, CLIENT_CMD, NODE_CMD, WALLET_CMD}; use crate::cli::BRIDGE_POOL_CMD; - /// Commands for `anoma` binary. + /// Commands for `namada` binary. #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] - pub enum Anoma { + pub enum Namada { // Sub-binary-commands - Node(AnomaNode), - Client(AnomaClient), - Wallet(AnomaWallet), + Node(NamadaNode), + Client(NamadaClient), + Wallet(NamadaWallet), // Inlined commands from the node. EthBridgePool(EthBridgePool), @@ -53,13 +53,14 @@ pub mod cmds { TxUpdateVp(TxUpdateVp), TxInitProposal(TxInitProposal), TxVoteProposal(TxVoteProposal), + TxRevealPk(TxRevealPk), } - impl Cmd for Anoma { + impl Cmd for Namada { fn add_sub(app: App) -> App { - app.subcommand(AnomaNode::def()) - .subcommand(AnomaClient::def()) - .subcommand(AnomaWallet::def()) + app.subcommand(NamadaNode::def()) + .subcommand(NamadaClient::def()) + .subcommand(NamadaWallet::def()) .subcommand(EthBridgePool::def()) .subcommand(Ledger::def()) .subcommand(TxCustom::def()) @@ -68,6 +69,7 @@ pub mod cmds { .subcommand(TxUpdateVp::def()) .subcommand(TxInitProposal::def()) .subcommand(TxVoteProposal::def()) + .subcommand(TxRevealPk::def()) } fn parse(matches: &ArgMatches) -> Option { @@ -84,6 +86,7 @@ pub mod cmds { SubCmd::parse(matches).map(Self::TxInitProposal); let tx_vote_proposal = SubCmd::parse(matches).map(Self::TxVoteProposal); + let tx_reveal_pk = SubCmd::parse(matches).map(Self::TxRevealPk); node.or(client) .or(wallet) .or(ledger) @@ -93,19 +96,20 @@ pub mod cmds { .or(tx_update_vp) .or(tx_init_proposal) .or(tx_vote_proposal) + .or(tx_reveal_pk) } } - /// Used as top-level commands (`Cmd` instance) in `anoman` binary. - /// Used as sub-commands (`SubCmd` instance) in `anoma` binary. + /// Used as top-level commands (`Cmd` instance) in `namadan` binary. + /// Used as sub-commands (`SubCmd` instance) in `namada` binary. #[derive(Clone, Debug)] #[allow(clippy::large_enum_variant)] - pub enum AnomaNode { + pub enum NamadaNode { Ledger(Ledger), Config(Config), } - impl Cmd for AnomaNode { + impl Cmd for NamadaNode { fn add_sub(app: App) -> App { app.subcommand(Ledger::def()).subcommand(Config::def()) } @@ -116,7 +120,7 @@ pub mod cmds { ledger.or(config) } } - impl SubCmd for AnomaNode { + impl SubCmd for NamadaNode { const CMD: &'static str = NODE_CMD; fn parse(matches: &ArgMatches) -> Option { @@ -134,20 +138,20 @@ pub mod cmds { } } - /// Used as top-level commands (`Cmd` instance) in `anomac` binary. - /// Used as sub-commands (`SubCmd` instance) in `anoma` binary. + /// Used as top-level commands (`Cmd` instance) in `namadac` binary. + /// Used as sub-commands (`SubCmd` instance) in `namada` binary. #[derive(Clone, Debug)] #[allow(clippy::large_enum_variant)] - pub enum AnomaClient { + pub enum NamadaClient { /// The [`super::Context`] provides access to the wallet and the /// config. It will generate a new wallet and config, if they /// don't exist. - WithContext(AnomaClientWithContext), + WithContext(NamadaClientWithContext), /// Utils don't have [`super::Context`], only the global arguments. WithoutContext(Utils), } - impl Cmd for AnomaClient { + impl Cmd for NamadaClient { fn add_sub(app: App) -> App { app // Simple transactions @@ -156,11 +160,12 @@ pub mod cmds { .subcommand(TxIbcTransfer::def().display_order(1)) .subcommand(TxUpdateVp::def().display_order(1)) .subcommand(TxInitAccount::def().display_order(1)) - .subcommand(TxInitValidator::def().display_order(1)) + .subcommand(TxRevealPk::def().display_order(1)) // Proposal transactions .subcommand(TxInitProposal::def().display_order(1)) .subcommand(TxVoteProposal::def().display_order(1)) // PoS transactions + .subcommand(TxInitValidator::def().display_order(2)) .subcommand(Bond::def().display_order(2)) .subcommand(Unbond::def().display_order(2)) .subcommand(Withdraw::def().display_order(2)) @@ -173,7 +178,7 @@ pub mod cmds { .subcommand(QueryBlock::def().display_order(3)) .subcommand(QueryBalance::def().display_order(3)) .subcommand(QueryBonds::def().display_order(3)) - .subcommand(QueryVotingPower::def().display_order(3)) + .subcommand(QueryBondedStake::def().display_order(3)) .subcommand(QuerySlashes::def().display_order(3)) .subcommand(QueryResult::def().display_order(3)) .subcommand(QueryRawBytes::def().display_order(3)) @@ -185,7 +190,7 @@ pub mod cmds { } fn parse(matches: &ArgMatches) -> Option { - use AnomaClientWithContext::*; + use NamadaClientWithContext::*; let tx_custom = Self::parse_with_ctx(matches, TxCustom); let tx_transfer = Self::parse_with_ctx(matches, TxTransfer); let tx_ibc_transfer = Self::parse_with_ctx(matches, TxIbcTransfer); @@ -193,6 +198,7 @@ pub mod cmds { let tx_init_account = Self::parse_with_ctx(matches, TxInitAccount); let tx_init_validator = Self::parse_with_ctx(matches, TxInitValidator); + let tx_reveal_pk = Self::parse_with_ctx(matches, TxRevealPk); let tx_init_proposal = Self::parse_with_ctx(matches, TxInitProposal); let tx_vote_proposal = @@ -207,8 +213,8 @@ pub mod cmds { let query_block = Self::parse_with_ctx(matches, QueryBlock); let query_balance = Self::parse_with_ctx(matches, QueryBalance); let query_bonds = Self::parse_with_ctx(matches, QueryBonds); - let query_voting_power = - Self::parse_with_ctx(matches, QueryVotingPower); + let query_bonded_stake = + Self::parse_with_ctx(matches, QueryBondedStake); let query_slashes = Self::parse_with_ctx(matches, QuerySlashes); let query_result = Self::parse_with_ctx(matches, QueryResult); let query_raw_bytes = Self::parse_with_ctx(matches, QueryRawBytes); @@ -225,9 +231,10 @@ pub mod cmds { .or(tx_ibc_transfer) .or(tx_update_vp) .or(tx_init_account) - .or(tx_init_validator) + .or(tx_reveal_pk) .or(tx_init_proposal) .or(tx_vote_proposal) + .or(tx_init_validator) .or(bond) .or(unbond) .or(withdraw) @@ -238,7 +245,7 @@ pub mod cmds { .or(query_block) .or(query_balance) .or(query_bonds) - .or(query_voting_power) + .or(query_bonded_stake) .or(query_slashes) .or(query_result) .or(query_raw_bytes) @@ -249,18 +256,18 @@ pub mod cmds { } } - impl AnomaClient { + impl NamadaClient { /// A helper method to parse sub cmds with context fn parse_with_ctx( matches: &ArgMatches, - sub_to_self: impl Fn(T) -> AnomaClientWithContext, + sub_to_self: impl Fn(T) -> NamadaClientWithContext, ) -> Option { SubCmd::parse(matches) .map(|sub| Self::WithContext(sub_to_self(sub))) } } - impl SubCmd for AnomaClient { + impl SubCmd for NamadaClient { const CMD: &'static str = CLIENT_CMD; fn parse(matches: &ArgMatches) -> Option { @@ -279,7 +286,7 @@ pub mod cmds { } #[derive(Clone, Debug)] - pub enum AnomaClientWithContext { + pub enum NamadaClientWithContext { // Ledger cmds TxCustom(TxCustom), TxTransfer(TxTransfer), @@ -290,6 +297,7 @@ pub mod cmds { TxInitValidator(TxInitValidator), TxInitProposal(TxInitProposal), TxVoteProposal(TxVoteProposal), + TxRevealPk(TxRevealPk), Bond(Bond), Unbond(Unbond), Withdraw(Withdraw), @@ -300,7 +308,8 @@ pub mod cmds { QueryBlock(QueryBlock), QueryBalance(QueryBalance), QueryBonds(QueryBonds), - QueryVotingPower(QueryVotingPower), + QueryBondedStake(QueryBondedStake), + QueryCommissionRate(QueryCommissionRate), QuerySlashes(QuerySlashes), QueryRawBytes(QueryRawBytes), QueryProposal(QueryProposal), @@ -310,7 +319,7 @@ pub mod cmds { #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] - pub enum AnomaWallet { + pub enum NamadaWallet { /// Key management commands Key(WalletKey), /// Address management commands @@ -319,7 +328,7 @@ pub mod cmds { Masp(WalletMasp), } - impl Cmd for AnomaWallet { + impl Cmd for NamadaWallet { fn add_sub(app: App) -> App { app.subcommand(WalletKey::def()) .subcommand(WalletAddress::def()) @@ -334,7 +343,7 @@ pub mod cmds { } } - impl SubCmd for AnomaWallet { + impl SubCmd for NamadaWallet { const CMD: &'static str = WALLET_CMD; fn parse(matches: &ArgMatches) -> Option { @@ -793,7 +802,7 @@ pub mod cmds { } fn def() -> App { - App::new(Self::CMD).about("Run Anoma ledger node.") + App::new(Self::CMD).about("Run Namada ledger node.") } } @@ -809,7 +818,7 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD).about( - "Delete Anoma ledger node's and Tendermint node's storage \ + "Delete Namada ledger node's and Tendermint node's storage \ data.", ) } @@ -1056,8 +1065,8 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about( - "Send a signed transaction to create a new validator and \ - its staking reward account.", + "Send a signed transaction to create a new validator \ + account.", ) .add_args::() } @@ -1216,21 +1225,21 @@ pub mod cmds { } #[derive(Clone, Debug)] - pub struct QueryVotingPower(pub args::QueryVotingPower); + pub struct QueryBondedStake(pub args::QueryBondedStake); - impl SubCmd for QueryVotingPower { - const CMD: &'static str = "voting-power"; + impl SubCmd for QueryBondedStake { + const CMD: &'static str = "bonded-stake"; fn parse(matches: &ArgMatches) -> Option { matches.subcommand_matches(Self::CMD).map(|matches| { - QueryVotingPower(args::QueryVotingPower::parse(matches)) + QueryBondedStake(args::QueryBondedStake::parse(matches)) }) } fn def() -> App { App::new(Self::CMD) - .about("Query PoS voting power.") - .add_args::() + .about("Query PoS bonded stake.") + .add_args::() } } @@ -1253,6 +1262,25 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct QueryCommissionRate(pub args::QueryCommissionRate); + + impl SubCmd for QueryCommissionRate { + const CMD: &'static str = "commission-rate"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|matches| { + QueryCommissionRate(args::QueryCommissionRate::parse(matches)) + }) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Query commission rate.") + .add_args::() + } + } + #[derive(Clone, Debug)] pub struct QuerySlashes(pub args::QuerySlashes); @@ -1338,6 +1366,36 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct TxRevealPk(pub args::RevealPk); + + impl SubCmd for TxRevealPk { + const CMD: &'static str = "reveal-pk"; + + fn parse(matches: &ArgMatches) -> Option + where + Self: Sized, + { + matches + .subcommand_matches(Self::CMD) + .map(|matches| TxRevealPk(args::RevealPk::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Submit a tx to reveal the public key an implicit \ + account. Typically, you don't have to do this manually \ + and the client will detect when a tx to reveal PK is \ + needed and submit it automatically. This will write the \ + PK into the account's storage so that it can be used for \ + signature verification on transactions authorized by \ + this account.", + ) + .add_args::() + } + } + #[derive(Clone, Debug)] pub enum Utils { JoinNetwork(JoinNetwork), @@ -1390,7 +1448,7 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) - .about("Configure Anoma to join an existing network.") + .about("Configure Namada to join an existing network.") .add_args::() } } @@ -1448,15 +1506,15 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) .about( - "Initialize genesis validator's address, staking reward \ - address, consensus key, validator account key and \ - staking rewards key and use it in the ledger's node.", + "Initialize genesis validator's address, consensus key \ + and validator account key and use it in the ledger's \ + node.", ) .add_args::() } } - /// Used as sub-commands (`SubCmd` instance) in `anoma` binary. + /// Used as sub-commands (`SubCmd` instance) in `namada` binary. #[derive(Clone, Debug)] pub enum EthBridgePool { /// Construct a proof that a set of transfers is in the pool. @@ -1582,6 +1640,7 @@ pub mod args { use namada::types::token; use namada::types::token::Amount; use namada::types::transaction::GasLimit; + use rust_decimal::Decimal; use super::context::*; use super::utils::*; @@ -1601,7 +1660,7 @@ pub mod args { const BALANCE_OWNER: ArgOpt = arg_opt("owner"); const BASE_DIR: ArgDefault = arg_default( "base-dir", - DefaultFn(|| match env::var("ANOMA_BASE_DIR") { + DefaultFn(|| match env::var("NAMADA_BASE_DIR") { Ok(dir) => dir.into(), Err(_) => config::DEFAULT_BASE_DIR.into(), }), @@ -1613,6 +1672,7 @@ pub mod args { const CHANNEL_ID: Arg = arg("channel-id"); const CODE_PATH: Arg = arg("code-path"); const CODE_PATH_OPT: ArgOpt = CODE_PATH.opt(); + const COMMISSION_RATE: Arg = arg("commission-rate"); const CONSENSUS_TIMEOUT_COMMIT: ArgDefault = arg_default( "consensus-timeout-commit", DefaultFn(|| Timeout::from_str("1s").unwrap()), @@ -1651,6 +1711,8 @@ pub mod args { const LEDGER_ADDRESS: Arg = arg("ledger-address"); const LOCALHOST: ArgFlag = flag("localhost"); const MASP_VALUE: Arg = arg("value"); + const MAX_COMMISSION_RATE_CHANGE: Arg = + arg("max-commission-rate-change"); const MODE: ArgOpt = arg_opt("mode"); const NET_ADDRESS: Arg = arg("net-address"); const NO_CONVERSIONS: ArgFlag = flag("no-conversions"); @@ -1671,8 +1733,6 @@ pub mod args { const RAW_ADDRESS_OPT: ArgOpt
= RAW_ADDRESS.opt(); const RAW_PUBLIC_KEY_OPT: ArgOpt = arg_opt("public-key"); const RECEIVER: Arg = arg("receiver"); - const REWARDS_CODE_PATH: ArgOpt = arg_opt("rewards-code-path"); - const REWARDS_KEY: ArgOpt = arg_opt("rewards-key"); const SCHEME: ArgDefault = arg_default("scheme", DefaultFn(|| SchemeType::Ed25519)); const SIGNER: ArgOpt = arg_opt("signer"); @@ -1737,18 +1797,18 @@ pub mod args { .arg(BASE_DIR.def().about( "The base directory is where the nodes, client and wallet \ configuration and state is stored. This value can also \ - be set via `ANOMA_BASE_DIR` environment variable, but \ + be set via `NAMADA_BASE_DIR` environment variable, but \ the argument takes precedence, if specified. Defaults to \ - `.anoma`.", + `.namada`.", )) .arg(WASM_DIR.def().about( "Directory with built WASM validity predicates, \ transactions. This value can also be set via \ - `ANOMA_WASM_DIR` environment variable, but the argument \ + `NAMADA_WASM_DIR` environment variable, but the argument \ takes precedence, if specified.", )) .arg(MODE.def().about( - "The mode in which to run Anoma. Options are \n\t * \ + "The mode in which to run Namada. Options are \n\t * \ Validator (default)\n\t * Full\n\t * Seed", )) } @@ -2117,10 +2177,10 @@ pub mod args { pub consensus_key: Option, pub eth_cold_key: Option, pub eth_hot_key: Option, - pub rewards_account_key: Option, pub protocol_key: Option, + pub commission_rate: Decimal, + pub max_commission_rate_change: Decimal, pub validator_vp_code_path: Option, - pub rewards_vp_code_path: Option, pub unsafe_dont_encrypt: bool, } @@ -2133,10 +2193,11 @@ pub mod args { let consensus_key = VALIDATOR_CONSENSUS_KEY.parse(matches); let eth_cold_key = VALIDATOR_ETH_COLD_KEY.parse(matches); let eth_hot_key = VALIDATOR_ETH_HOT_KEY.parse(matches); - let rewards_account_key = REWARDS_KEY.parse(matches); let protocol_key = PROTOCOL_KEY.parse(matches); + let commission_rate = COMMISSION_RATE.parse(matches); + let max_commission_rate_change = + MAX_COMMISSION_RATE_CHANGE.parse(matches); let validator_vp_code_path = VALIDATOR_CODE_PATH.parse(matches); - let rewards_vp_code_path = REWARDS_CODE_PATH.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { tx, @@ -2146,10 +2207,10 @@ pub mod args { consensus_key, eth_cold_key, eth_hot_key, - rewards_account_key, protocol_key, + commission_rate, + max_commission_rate_change, validator_vp_code_path, - rewards_vp_code_path, unsafe_dont_encrypt, } } @@ -2182,24 +2243,26 @@ pub mod args { be generated if none given. Note that this must be \ secp256k1.", )) - .arg(REWARDS_KEY.def().about( - "A public key for the staking reward account. A new one \ - will be generated if none given.", - )) .arg(PROTOCOL_KEY.def().about( "A public key for signing protocol transactions. A new \ one will be generated if none given.", )) + .arg(COMMISSION_RATE.def().about( + "The commission rate charged by the validator for \ + delegation rewards. Expressed as a decimal between 0 and \ + 1. This is a required parameter.", + )) + .arg(MAX_COMMISSION_RATE_CHANGE.def().about( + "The maximum change per epoch in the commission rate \ + charged by the validator for delegation rewards. \ + Expressed as a decimal between 0 and 1. This is a \ + required parameter.", + )) .arg(VALIDATOR_CODE_PATH.def().about( "The path to the validity predicate WASM code to be used \ for the validator account. Uses the default validator VP \ if none specified.", )) - .arg(REWARDS_CODE_PATH.def().about( - "The path to the validity predicate WASM code to be used \ - for the staking reward account. Uses the default staking \ - reward VP if none specified.", - )) .arg(UNSAFE_DONT_ENCRYPT.def().about( "UNSAFE: Do not encrypt the generated keypairs. Do not \ use this for keys used in a live network.", @@ -2428,6 +2491,28 @@ pub mod args { } } + #[derive(Clone, Debug)] + pub struct RevealPk { + /// Common tx arguments + pub tx: Tx, + /// A public key to be revealed on-chain + pub public_key: WalletPublicKey, + } + + impl Args for RevealPk { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let public_key = PUBLIC_KEY.parse(matches); + + Self { tx, public_key } + } + + fn def(app: App) -> App { + app.add_args::() + .arg(PUBLIC_KEY.def().about("A public key to reveal.")) + } + } + #[derive(Clone, Debug)] pub struct QueryProposal { /// Common query args @@ -2720,18 +2805,18 @@ pub mod args { } } - /// Query PoS voting power + /// Query PoS bonded stake #[derive(Clone, Debug)] - pub struct QueryVotingPower { + pub struct QueryBondedStake { /// Common query args pub query: Query, /// Address of a validator pub validator: Option, - /// Epoch in which to find voting power + /// Epoch in which to find bonded stake pub epoch: Option, } - impl Args for QueryVotingPower { + impl Args for QueryBondedStake { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); let validator = VALIDATOR_OPT.parse(matches); @@ -2746,7 +2831,78 @@ pub mod args { fn def(app: App) -> App { app.add_args::() .arg(VALIDATOR_OPT.def().about( - "The validator's address whose voting power to query.", + "The validator's address whose bonded stake to query.", + )) + .arg(EPOCH.def().about( + "The epoch at which to query (last committed, if not \ + specified).", + )) + } + } + + #[derive(Clone, Debug)] + /// Commission rate change args + pub struct TxCommissionRateChange { + /// Common tx arguments + pub tx: Tx, + /// Validator address (should be self) + pub validator: WalletAddress, + /// Value to which the tx changes the commission rate + pub rate: Decimal, + } + + impl Args for TxCommissionRateChange { + fn parse(matches: &ArgMatches) -> Self { + let tx = Tx::parse(matches); + let validator = VALIDATOR.parse(matches); + let rate = COMMISSION_RATE.parse(matches); + Self { + tx, + validator, + rate, + } + } + + fn def(app: App) -> App { + app.add_args::() + .arg(VALIDATOR.def().about( + "The validator's address whose commission rate to change.", + )) + .arg( + COMMISSION_RATE + .def() + .about("The desired new commission rate."), + ) + } + } + + /// Query PoS commission rate + #[derive(Clone, Debug)] + pub struct QueryCommissionRate { + /// Common query args + pub query: Query, + /// Address of a validator + pub validator: WalletAddress, + /// Epoch in which to find commission rate + pub epoch: Option, + } + + impl Args for QueryCommissionRate { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let validator = VALIDATOR.parse(matches); + let epoch = EPOCH.parse(matches); + Self { + query, + validator, + epoch, + } + } + + fn def(app: App) -> App { + app.add_args::() + .arg(VALIDATOR.def().about( + "The validator's address whose commission rate to query.", )) .arg(EPOCH.def().about( "The epoch at which to query (last committed, if not \ @@ -3440,6 +3596,8 @@ pub mod args { #[derive(Clone, Debug)] pub struct InitGenesisValidator { pub alias: String, + pub commission_rate: Decimal, + pub max_commission_rate_change: Decimal, pub net_address: SocketAddr, pub unsafe_dont_encrypt: bool, pub key_scheme: SchemeType, @@ -3448,6 +3606,9 @@ pub mod args { impl Args for InitGenesisValidator { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); + let commission_rate = COMMISSION_RATE.parse(matches); + let max_commission_rate_change = + MAX_COMMISSION_RATE_CHANGE.parse(matches); let net_address = NET_ADDRESS.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); let key_scheme = SCHEME.parse(matches); @@ -3456,6 +3617,8 @@ pub mod args { net_address, unsafe_dont_encrypt, key_scheme, + commission_rate, + max_commission_rate_change, } } @@ -3463,9 +3626,18 @@ pub mod args { app.arg(ALIAS.def().about("The validator address alias.")) .arg(NET_ADDRESS.def().about( "Static {host:port} of your validator node's P2P address. \ - Anoma uses port `26656` for P2P connections by default, \ + Namada uses port `26656` for P2P connections by default, \ but you can configure a different value.", )) + .arg(COMMISSION_RATE.def().about( + "The commission rate charged by the validator for \ + delegation rewards. This is a required parameter.", + )) + .arg(MAX_COMMISSION_RATE_CHANGE.def().about( + "The maximum change per epoch in the commission rate \ + charged by the validator for delegation rewards. This is \ + a required parameter.", + )) .arg(UNSAFE_DONT_ENCRYPT.def().about( "UNSAFE: Do not encrypt the generated keypairs. Do not \ use this for keys used in a live network.", @@ -3478,45 +3650,46 @@ pub mod args { } } -pub fn anoma_cli() -> (cmds::Anoma, String) { - let app = anoma_app(); +pub fn namada_cli() -> (cmds::Namada, String) { + let app = namada_app(); let matches = app.get_matches(); let raw_sub_cmd = matches.subcommand().map(|(raw, _matches)| raw.to_string()); - let result = cmds::Anoma::parse(&matches); + let result = cmds::Namada::parse(&matches); match (result, raw_sub_cmd) { (Some(cmd), Some(raw_sub)) => return (cmd, raw_sub), _ => { - anoma_app().print_help().unwrap(); + namada_app().print_help().unwrap(); } } safe_exit(2); } -pub fn anoma_node_cli() -> Result<(cmds::AnomaNode, Context)> { - let app = anoma_node_app(); - cmds::AnomaNode::parse_or_print_help(app) +pub fn namada_node_cli() -> Result<(cmds::NamadaNode, Context)> { + let app = namada_node_app(); + cmds::NamadaNode::parse_or_print_help(app) } -pub enum AnomaClient { +#[allow(clippy::large_enum_variant)] +pub enum NamadaClient { WithoutContext(cmds::Utils, args::Global), - WithContext(Box<(cmds::AnomaClientWithContext, Context)>), + WithContext(Box<(cmds::NamadaClientWithContext, Context)>), } -pub fn anoma_client_cli() -> Result { - let app = anoma_client_app(); - let mut app = cmds::AnomaClient::add_sub(app); +pub fn namada_client_cli() -> Result { + let app = namada_client_app(); + let mut app = cmds::NamadaClient::add_sub(app); let matches = app.clone().get_matches(); match Cmd::parse(&matches) { Some(cmd) => { let global_args = args::Global::parse(&matches); match cmd { - cmds::AnomaClient::WithContext(sub_cmd) => { + cmds::NamadaClient::WithContext(sub_cmd) => { let context = Context::new(global_args)?; - Ok(AnomaClient::WithContext(Box::new((sub_cmd, context)))) + Ok(NamadaClient::WithContext(Box::new((sub_cmd, context)))) } - cmds::AnomaClient::WithoutContext(sub_cmd) => { - Ok(AnomaClient::WithoutContext(sub_cmd, global_args)) + cmds::NamadaClient::WithoutContext(sub_cmd) => { + Ok(NamadaClient::WithoutContext(sub_cmd, global_args)) } } } @@ -3527,52 +3700,52 @@ pub fn anoma_client_cli() -> Result { } } -pub fn anoma_wallet_cli() -> Result<(cmds::AnomaWallet, Context)> { - let app = anoma_wallet_app(); - cmds::AnomaWallet::parse_or_print_help(app) +pub fn namada_wallet_cli() -> Result<(cmds::NamadaWallet, Context)> { + let app = namada_wallet_app(); + cmds::NamadaWallet::parse_or_print_help(app) } -pub fn anoma_relayer_cli() -> Result<(cmds::EthBridgePool, Context)> { - let app = anoma_relayer_app(); +pub fn namada_relayer_cli() -> Result<(cmds::EthBridgePool, Context)> { + let app = namada_relayer_app(); cmds::EthBridgePool::parse_or_print_help(app) } -fn anoma_app() -> App { +fn namada_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma command line interface.") + .version(namada_version()) + .about("Namada command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::Anoma::add_sub(args::Global::def(app)) + cmds::Namada::add_sub(args::Global::def(app)) } -fn anoma_node_app() -> App { +fn namada_node_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma node command line interface.") + .version(namada_version()) + .about("Namada node command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::AnomaNode::add_sub(args::Global::def(app)) + cmds::NamadaNode::add_sub(args::Global::def(app)) } -fn anoma_client_app() -> App { +fn namada_client_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma client command line interface.") + .version(namada_version()) + .about("Namada client command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::AnomaClient::add_sub(args::Global::def(app)) + cmds::NamadaClient::add_sub(args::Global::def(app)) } -fn anoma_wallet_app() -> App { +fn namada_wallet_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma wallet command line interface.") + .version(namada_version()) + .about("Namada wallet command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); - cmds::AnomaWallet::add_sub(args::Global::def(app)) + cmds::NamadaWallet::add_sub(args::Global::def(app)) } -fn anoma_relayer_app() -> App { +fn namada_relayer_app() -> App { let app = App::new(APP_NAME) - .version(anoma_version()) - .about("Anoma Ethereum bridge pool command line interface.") + .version(namada_version()) + .about("Namada Ethereum bridge pool command line interface.") .setting(AppSettings::SubcommandRequiredElseHelp); cmds::EthBridgePool::add_sub(args::Global::def(app)) } diff --git a/apps/src/lib/cli/context.rs b/apps/src/lib/cli/context.rs index ed09905b447..e61fda9dfc0 100644 --- a/apps/src/lib/cli/context.rs +++ b/apps/src/lib/cli/context.rs @@ -20,9 +20,9 @@ use crate::wallet::Wallet; use crate::wasm_loader; /// Env. var to set chain ID -const ENV_VAR_CHAIN_ID: &str = "ANOMA_CHAIN_ID"; +const ENV_VAR_CHAIN_ID: &str = "NAMADA_CHAIN_ID"; /// Env. var to set wasm directory -pub const ENV_VAR_WASM_DIR: &str = "ANOMA_WASM_DIR"; +pub const ENV_VAR_WASM_DIR: &str = "NAMADA_WASM_DIR"; /// A raw address (bech32m encoding) or an alias of an address that may be found /// in the wallet @@ -73,6 +73,8 @@ pub struct Context { pub config: Config, /// The context fr shielded operations pub shielded: ShieldedContext, + /// Native token's address + pub native_token: Address, } impl Context { @@ -88,14 +90,16 @@ impl Context { let chain_dir = global_args .base_dir - .join(&global_config.default_chain_id.as_str()); + .join(global_config.default_chain_id.as_str()); let genesis_file_path = global_args .base_dir .join(format!("{}.toml", global_config.default_chain_id.as_str())); - let wallet = Wallet::load_or_new_from_genesis( - &chain_dir, - genesis_config::open_genesis_config(&genesis_file_path)?, - ); + let genesis = genesis_config::read_genesis_config(&genesis_file_path); + let native_token = genesis.native_token; + let default_genesis = + genesis_config::open_genesis_config(genesis_file_path)?; + let wallet = + Wallet::load_or_new_from_genesis(&chain_dir, default_genesis); // If the WASM dir specified, put it in the config match global_args.wasm_dir.as_ref() { @@ -115,6 +119,7 @@ impl Context { global_config, config, shielded: ShieldedContext::new(chain_dir), + native_token, }) } diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index f0850b1d002..3fb4d8eea8e 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -25,11 +25,9 @@ use masp_primitives::zip32::ExtendedFullViewingKey; use namada::ledger::events::Event; use namada::ledger::governance::parameters::GovParams; use namada::ledger::governance::storage as gov_storage; -use namada::ledger::governance::utils::Votes; +use namada::ledger::native_vp::governance::utils::Votes; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; -use namada::ledger::pos::types::{ - Epoch as PosEpoch, VotingPower, WeightedValidator, -}; +use namada::ledger::pos::types::{decimal_mult_u64, WeightedValidator}; use namada::ledger::pos::{ self, is_validator_slashes_key, BondId, Bonds, PosParams, Slash, Unbonds, }; @@ -53,6 +51,7 @@ use namada::types::transaction::{ WrapperTx, }; use namada::types::{address, storage, token}; +use rust_decimal::Decimal; use tokio::time::{Duration, Instant}; use crate::cli::{self, args, Context}; @@ -130,7 +129,7 @@ pub async fn query_epoch(args: args::Query) -> Epoch { /// Query the last committed block pub async fn query_block( args: args::Query, -) -> tendermint_rpc::endpoint::block::Response { +) -> crate::facade::tendermint_rpc::endpoint::block::Response { let client = HttpClient::new(args.ledger_address).unwrap(); let response = client.latest_block().await.unwrap(); println!( @@ -776,6 +775,17 @@ pub async fn query_proposal(_ctx: Context, args: args::QueryProposal) { println!("{:4}Status: pending", ""); } else if start_epoch <= current_epoch && current_epoch <= end_epoch { + let votes = get_proposal_votes(client, start_epoch, id).await; + let partial_proposal_result = + compute_tally(client, start_epoch, votes).await; + println!( + "{:4}Yay votes: {}", + "", partial_proposal_result.total_yay_power + ); + println!( + "{:4}Nay votes: {}", + "", partial_proposal_result.total_nay_power + ); println!("{:4}Status: on-going", ""); } else { let votes = get_proposal_votes(client, start_epoch, id).await; @@ -1228,7 +1238,7 @@ pub async fn query_proposal_result( cli::safe_exit(1) } - let file = File::open(&path.join("proposal")) + let file = File::open(path.join("proposal")) .expect("Proposal file must exist."); let proposal: OfflineProposal = serde_json::from_reader(file).expect( @@ -1287,7 +1297,7 @@ pub async fn query_protocol_parameters( println!("Governance Parameters\n {:4}", gov_parameters); println!("Protocol parameters"); - let key = param_storage::get_epoch_storage_key(); + let key = param_storage::get_epoch_duration_storage_key(); let epoch_duration = query_storage_value::(&client, &key) .await .expect("Parameter should be definied."); @@ -1332,12 +1342,12 @@ pub async fn query_protocol_parameters( "", pos_params.block_vote_reward ); println!( - "{:4}Duplicate vote slash rate: {}", - "", pos_params.duplicate_vote_slash_rate + "{:4}Duplicate vote minimum slash rate: {}", + "", pos_params.duplicate_vote_min_slash_rate ); println!( - "{:4}Light client attack slash rate: {}", - "", pos_params.light_client_attack_slash_rate + "{:4}Light client attack minimum slash rate: {}", + "", pos_params.light_client_attack_min_slash_rate ); println!( "{:4}Max. validator slots: {}", @@ -1345,7 +1355,7 @@ pub async fn query_protocol_parameters( ); println!("{:4}Pipeline length: {}", "", pos_params.pipeline_len); println!("{:4}Unbonding length: {}", "", pos_params.unbonding_len); - println!("{:4}Votes per token: {}", "", pos_params.votes_per_token); + println!("{:4}Votes per token: {}", "", pos_params.tm_votes_per_token); } /// Query PoS bond(s) @@ -1695,8 +1705,8 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { } } -/// Query PoS voting power -pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { +/// Query PoS bonded stake +pub async fn query_bonded_stake(ctx: Context, args: args::QueryBondedStake) { let epoch = match args.epoch { Some(epoch) => epoch, None => query_epoch(args.query.clone()).await, @@ -1712,26 +1722,26 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { let validator_set = validator_sets .get(epoch) .expect("Validator set should be always set in the current epoch"); + match args.validator { Some(validator) => { let validator = ctx.get(&validator); - // Find voting power for the given validator - let voting_power_key = pos::validator_voting_power_key(&validator); - let voting_powers = - query_storage_value::( - &client, - &voting_power_key, - ) - .await; - match voting_powers.and_then(|data| data.get(epoch)) { - Some(voting_power_delta) => { - let voting_power: VotingPower = - voting_power_delta.try_into().expect( - "The sum voting power deltas shouldn't be negative", - ); + // Find bonded stake for the given validator + let validator_deltas_key = pos::validator_deltas_key(&validator); + let validator_deltas = query_storage_value::( + &client, + &validator_deltas_key, + ) + .await; + match validator_deltas.and_then(|data| data.get(epoch)) { + Some(val_stake) => { + let bonded_stake: u64 = val_stake.try_into().expect( + "The sum of the bonded stake deltas shouldn't be \ + negative", + ); let weighted = WeightedValidator { address: validator.clone(), - voting_power, + bonded_stake, }; let is_active = validator_set.active.contains(&weighted); if !is_active { @@ -1740,14 +1750,14 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { ); } println!( - "Validator {} is {}, voting power: {}", + "Validator {} is {}, bonded stake: {}", validator.encode(), if is_active { "active" } else { "inactive" }, - voting_power + bonded_stake, ) } None => { - println!("No voting power found for {}", validator.encode()) + println!("No bonded stake found for {}", validator.encode()) } } } @@ -1762,7 +1772,7 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { w, " {}: {}", active.address.encode(), - active.voting_power + active.bonded_stake ) .unwrap(); } @@ -1773,24 +1783,82 @@ pub async fn query_voting_power(ctx: Context, args: args::QueryVotingPower) { w, " {}: {}", inactive.address.encode(), - inactive.voting_power + inactive.bonded_stake ) .unwrap(); } } } } - let total_voting_power_key = pos::total_voting_power_key(); - let total_voting_powers = query_storage_value::( - &client, - &total_voting_power_key, - ) - .await - .expect("Total voting power should always be set"); - let total_voting_power = total_voting_powers + let total_deltas_key = pos::total_deltas_key(); + let total_deltas = + query_storage_value::(&client, &total_deltas_key) + .await + .expect("Total bonded stake should always be set"); + let total_bonded_stake = total_deltas .get(epoch) - .expect("Total voting power should be always set in the current epoch"); - println!("Total voting power: {}", total_voting_power); + .expect("Total bonded stake should be always set in the current epoch"); + let total_bonded_stake: u64 = total_bonded_stake + .try_into() + .expect("total_bonded_stake should be a positive value"); + + println!("Total bonded stake: {}", total_bonded_stake); +} + +/// Query PoS validator's commission rate +pub async fn query_commission_rate( + ctx: Context, + args: args::QueryCommissionRate, +) { + let epoch = match args.epoch { + Some(epoch) => epoch, + None => query_epoch(args.query.clone()).await, + }; + let client = HttpClient::new(args.query.ledger_address.clone()).unwrap(); + let validator = ctx.get(&args.validator); + let is_validator = + is_validator(&validator, args.query.ledger_address).await; + + if is_validator { + let validator_commission_key = + pos::validator_commission_rate_key(&validator); + let validator_max_commission_change_key = + pos::validator_max_commission_rate_change_key(&validator); + let commission_rates = query_storage_value::( + &client, + &validator_commission_key, + ) + .await; + let max_rate_change = query_storage_value::( + &client, + &validator_max_commission_change_key, + ) + .await; + let max_rate_change = + max_rate_change.expect("No max rate change found"); + let commission_rates = + commission_rates.expect("No commission rate found "); + match commission_rates.get(epoch) { + Some(rate) => { + println!( + "Validator {} commission rate: {}, max change per epoch: \ + {}", + validator.encode(), + *rate, + max_rate_change, + ) + } + None => { + println!( + "No commission rate found for {} in epoch {}", + validator.encode(), + epoch + ) + } + } + } else { + println!("Cannot find validator with address {}", validator); + } } /// Query PoS slashes @@ -1890,10 +1958,7 @@ pub async fn is_validator( ledger_address: TendermintAddress, ) -> bool { let client = HttpClient::new(ledger_address).unwrap(); - let key = pos::validator_state_key(address); - let state: Option = - query_storage_value(&client, &key).await; - state.is_some() + unwrap_client_response(RPC.vp().pos().is_validator(&client, address).await) } /// Check if a given address is a known delegator @@ -1945,8 +2010,8 @@ pub async fn known_address( fn apply_slashes( slashes: &[Slash], mut delta: token::Amount, - epoch_start: PosEpoch, - withdraw_epoch: Option, + epoch_start: Epoch, + withdraw_epoch: Option, mut w: Option<&mut std::io::StdoutLock>, ) -> token::Amount { let mut slashed = token::Amount::default(); @@ -1963,7 +2028,8 @@ fn apply_slashes( .unwrap(); } let raw_delta: u64 = delta.into(); - let current_slashed = token::Amount::from(slash.rate * raw_delta); + let current_slashed = + token::Amount::from(decimal_mult_u64(slash.rate, raw_delta)); slashed += current_slashed; delta -= current_slashed; } @@ -1997,8 +2063,7 @@ fn process_bonds_query( .unwrap(); delta = apply_slashes(slashes, delta, *epoch_start, None, Some(w)); current_total += delta; - let epoch_start: Epoch = (*epoch_start).into(); - if epoch >= &epoch_start { + if epoch >= epoch_start { total_active += delta; } } @@ -2053,8 +2118,7 @@ fn process_unbonds_query( Some(w), ); current_total += delta; - let epoch_end: Epoch = (*epoch_end).into(); - if epoch > &epoch_end { + if epoch > epoch_end { withdrawable += delta; } } @@ -2352,11 +2416,11 @@ pub async fn query_tx_response( // applied to the blockchain let query_event_opt = response_block_results.end_block_events.and_then(|events| { - (&events) + events .iter() .find(|event| { event.type_str == tx_query.event_type() - && (&event.attributes).iter().any(|tag| { + && event.attributes.iter().any(|tag| { tag.key.as_ref() == "hash" && tag.value.as_ref() == tx_query.tx_hash() }) @@ -2371,8 +2435,8 @@ pub async fn query_tx_response( ) })?; // Reformat the event attributes so as to ease value extraction - let event_map: std::collections::HashMap<&str, &str> = (&query_event - .attributes) + let event_map: std::collections::HashMap<&str, &str> = query_event + .attributes .iter() .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) .collect(); @@ -2461,8 +2525,10 @@ pub async fn get_proposal_votes( .expect("Vote key should contain the voting address.") .clone(); if vote.is_yay() && validators.contains(&voter_address) { - let amount = - get_validator_stake(client, epoch, &voter_address).await; + let amount: VotePower = + get_validator_stake(client, epoch, &voter_address) + .await + .into(); yay_validators.insert(voter_address, amount); } else if !validators.contains(&voter_address) { let validator_address = @@ -2536,12 +2602,13 @@ pub async fn get_proposal_offline_votes( if proposal_vote.vote.is_yay() && validators.contains(&proposal_vote.address) { - let amount = get_validator_stake( + let amount: VotePower = get_validator_stake( client, proposal.tally_epoch, &proposal_vote.address, ) - .await; + .await + .into(); yay_validators.insert(proposal_vote.address, amount); } else if is_delegator_at( client, @@ -2569,11 +2636,8 @@ pub async fn get_proposal_offline_votes( .await .unwrap_or_default(); let mut delegated_amount: token::Amount = 0.into(); - let epoch = namada::ledger::pos::types::Epoch::from( - proposal.tally_epoch.0, - ); let bond = epoched_bonds - .get(epoch) + .get(proposal.tally_epoch) .expect("Delegation bond should be defined."); let mut to_deduct = bond.neg_deltas; for (start_epoch, &(mut delta)) in @@ -2639,9 +2703,8 @@ pub async fn compute_tally( epoch: Epoch, votes: Votes, ) -> ProposalResult { - let validators = get_all_validators(client, epoch).await; - let total_stacked_tokens = - get_total_staked_tokes(client, epoch, &validators).await; + let total_staked_tokens: VotePower = + get_total_staked_tokens(client, epoch).await.into(); let Votes { yay_validators, @@ -2649,16 +2712,16 @@ pub async fn compute_tally( nay_delegators, } = votes; - let mut total_yay_stacked_tokens = VotePower::from(0_u64); + let mut total_yay_staked_tokens = VotePower::from(0_u64); for (_, amount) in yay_validators.clone().into_iter() { - total_yay_stacked_tokens += amount; + total_yay_staked_tokens += amount; } // YAY: Add delegator amount whose validator didn't vote / voted nay for (_, vote_map) in yay_delegators.iter() { for (validator_address, vote_power) in vote_map.iter() { if !yay_validators.contains_key(validator_address) { - total_yay_stacked_tokens += vote_power; + total_yay_staked_tokens += vote_power; } } } @@ -2667,23 +2730,23 @@ pub async fn compute_tally( for (_, vote_map) in nay_delegators.iter() { for (validator_address, vote_power) in vote_map.iter() { if yay_validators.contains_key(validator_address) { - total_yay_stacked_tokens -= vote_power; + total_yay_staked_tokens -= vote_power; } } } - if total_yay_stacked_tokens >= (total_stacked_tokens / 3) * 2 { + if total_yay_staked_tokens >= (total_staked_tokens / 3) * 2 { ProposalResult { result: TallyResult::Passed, - total_voting_power: total_stacked_tokens, - total_yay_power: total_yay_stacked_tokens, + total_voting_power: total_staked_tokens, + total_yay_power: total_yay_staked_tokens, total_nay_power: 0, } } else { ProposalResult { result: TallyResult::Rejected, - total_voting_power: total_stacked_tokens, - total_yay_power: total_yay_stacked_tokens, + total_voting_power: total_staked_tokens, + total_yay_power: total_yay_staked_tokens, total_nay_power: 0, } } @@ -2730,8 +2793,7 @@ pub async fn get_bond_amount_at( None, None, ); - let epoch_start: Epoch = (*epoch_start).into(); - if epoch >= epoch_start { + if epoch >= *epoch_start { delegated_amount += delta; } } @@ -2745,69 +2807,42 @@ pub async fn get_bond_amount_at( pub async fn get_all_validators( client: &HttpClient, epoch: Epoch, -) -> Vec
{ - let validator_set_key = pos::validator_set_key(); - let validator_sets = - query_storage_value::(client, &validator_set_key) - .await - .expect("Validator set should always be set"); - let validator_set = validator_sets - .get(epoch) - .expect("Validator set should be always set in the current epoch"); - let all_validators = validator_set.active.union(&validator_set.inactive); - all_validators - .map(|validator| validator.address.clone()) - .collect() +) -> HashSet
{ + unwrap_client_response( + RPC.vp() + .pos() + .validator_addresses(client, &Some(epoch)) + .await, + ) } -pub async fn get_total_staked_tokes( +pub async fn get_total_staked_tokens( client: &HttpClient, epoch: Epoch, - validators: &[Address], -) -> VotePower { - let mut total = VotePower::from(0_u64); - - for validator in validators { - total += get_validator_stake(client, epoch, validator).await; - } - total +) -> token::Amount { + unwrap_client_response( + RPC.vp().pos().total_stake(client, &Some(epoch)).await, + ) } async fn get_validator_stake( client: &HttpClient, epoch: Epoch, validator: &Address, -) -> VotePower { - let total_voting_power_key = pos::validator_total_deltas_key(validator); - let total_voting_power = query_storage_value::( - client, - &total_voting_power_key, +) -> token::Amount { + unwrap_client_response( + RPC.vp() + .pos() + .validator_stake(client, validator, &Some(epoch)) + .await, ) - .await - .expect("Total deltas should be defined"); - let epoched_total_voting_power = total_voting_power.get(epoch); - - VotePower::try_from(epoched_total_voting_power.unwrap_or_default()) - .unwrap_or_default() } pub async fn get_delegators_delegation( client: &HttpClient, address: &Address, - _epoch: Epoch, -) -> Vec
{ - let key = pos::bonds_for_source_prefix(address); - let bonds_iter = query_storage_prefix::(client, &key).await; - - let mut delegation_addresses: Vec
= Vec::new(); - if let Some(bonds) = bonds_iter { - for (key, _epoched_amount) in bonds { - let validator_address = pos::get_validator_address_from_bond(&key) - .expect("Delegation key should contain validator address."); - delegation_addresses.push(validator_address); - } - } - delegation_addresses +) -> HashSet
{ + unwrap_client_response(RPC.vp().pos().delegations(client, address).await) } pub async fn get_governance_parameters(client: &HttpClient) -> GovParams { diff --git a/apps/src/lib/client/signing.rs b/apps/src/lib/client/signing.rs index 681df3f8b6a..ed7ab484a90 100644 --- a/apps/src/lib/client/signing.rs +++ b/apps/src/lib/client/signing.rs @@ -109,9 +109,20 @@ pub async fn tx_signer( args.ledger_address.clone(), ) .await; + // Check if the signer is implicit account that needs to reveal its + // PK first + if matches!(signer, Address::Implicit(_)) { + let pk: common::PublicKey = signing_key.ref_to(); + super::tx::reveal_pk_if_needed(ctx, &pk, args).await; + } + signing_key + } + TxSigningKey::SecretKey(signing_key) => { + // Check if the signing key needs to reveal its PK first + let pk: common::PublicKey = signing_key.ref_to(); + super::tx::reveal_pk_if_needed(ctx, &pk, args).await; signing_key } - TxSigningKey::SecretKey(signing_key) => signing_key, TxSigningKey::None => { panic!( "All transactions must be signed; please either specify the \ diff --git a/apps/src/lib/client/tendermint_rpc_types.rs b/apps/src/lib/client/tendermint_rpc_types.rs index 66fe1912dff..537cca243fa 100644 --- a/apps/src/lib/client/tendermint_rpc_types.rs +++ b/apps/src/lib/client/tendermint_rpc_types.rs @@ -72,10 +72,7 @@ impl TryFrom for TxResponse { .map(String::as_str) // TODO: fix finalize block, to return initialized accounts, // even when we reject a tx? - .or(Some("[]")) - // NOTE: at this point we only have `Some(vec)`, not `None` - .ok_or_else(|| unreachable!()) - .and_then(|initialized_accounts| { + .map_or(Ok(vec![]), |initialized_accounts| { serde_json::from_str(initialized_accounts) .map_err(|err| format!("JSON decode error: {err}")) })?; diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 82b21c2b0fc..9334eb6dd7b 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -7,7 +7,6 @@ use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; use std::ops::Deref; use std::path::PathBuf; -use std::time::Duration; use async_std::io::prelude::WriteExt; use async_std::io::{self}; @@ -39,9 +38,9 @@ use namada::ibc::Height as IbcHeight; use namada::ibc_proto::cosmos::base::v1beta1::Coin; use namada::ledger::governance::storage as gov_storage; use namada::ledger::masp; -use namada::ledger::pos::{BondId, Bonds, Unbonds}; +use namada::ledger::pos::{BondId, Bonds, CommissionRates, Unbonds}; use namada::proto::Tx; -use namada::types::address::{masp, masp_tx_key, nam, Address}; +use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::governance::{ OfflineProposal, OfflineVote, Proposal, ProposalVote, }; @@ -51,6 +50,7 @@ use namada::types::storage::{ self, BlockHeight, Epoch, Key, KeySeg, TxIndex, RESERVED_ADDRESS_PREFIX, }; use namada::types::time::DateTimeUtc; +use namada::types::token; use namada::types::token::{ Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, }; @@ -58,11 +58,11 @@ use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada::types::transaction::{pos, InitAccount, InitValidator, UpdateVp}; -use namada::types::{address, token}; use namada::{ledger, vm}; use rand_core::{CryptoRng, OsRng, RngCore}; +use rust_decimal::Decimal; use sha2::Digest; -use tokio::time::Instant; +use tokio::time::{Duration, Instant}; use super::rpc; use super::types::ShieldedTransferContext; @@ -82,6 +82,7 @@ const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; const TX_INIT_VALIDATOR_WASM: &str = "tx_init_validator.wasm"; const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; const TX_VOTE_PROPOSAL: &str = "tx_vote_proposal.wasm"; +const TX_REVEAL_PK: &str = "tx_reveal_pk.wasm"; const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; const TX_IBC_WASM: &str = "tx_ibc.wasm"; @@ -89,6 +90,7 @@ const VP_USER_WASM: &str = "vp_user.wasm"; const TX_BOND_WASM: &str = "tx_bond.wasm"; const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; +const TX_CHANGE_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; /// Timeout for requests to the `/accepted` and `/applied` /// ABCI query endpoints. @@ -202,10 +204,10 @@ pub async fn submit_init_validator( consensus_key, eth_cold_key, eth_hot_key, - rewards_account_key, protocol_key, + commission_rate, + max_commission_rate_change, validator_vp_code_path, - rewards_vp_code_path, unsafe_dont_encrypt, }: args::TxInitValidator, ) { @@ -217,7 +219,6 @@ pub async fn submit_init_validator( let validator_key_alias = format!("{}-key", alias); let consensus_key_alias = format!("{}-consensus-key", alias); - let rewards_key_alias = format!("{}-rewards-key", alias); let eth_hot_key_alias = format!("{}-eth-hot-key", alias); let eth_cold_key_alias = format!("{}-eth-cold-key", alias); let account_key = ctx.get_opt_cached(&account_key).unwrap_or_else(|| { @@ -294,19 +295,6 @@ pub async fn submit_init_validator( ) .1 }); - - let rewards_account_key = - ctx.get_opt_cached(&rewards_account_key).unwrap_or_else(|| { - println!("Generating staking reward account key..."); - ctx.wallet - .gen_key( - scheme, - Some(rewards_key_alias.clone()), - unsafe_dont_encrypt, - ) - .1 - .ref_to() - }); let protocol_key = ctx.get_opt_cached(&protocol_key); if protocol_key.is_none() { @@ -330,24 +318,32 @@ pub async fn submit_init_validator( let validator_vp_code = validator_vp_code_path .map(|path| ctx.read_wasm(path)) .unwrap_or_else(|| ctx.read_wasm(VP_USER_WASM)); - // Validate the validator VP code - if let Err(err) = vm::validate_untrusted_wasm(&validator_vp_code) { + + // Validate the commission rate data + if commission_rate > Decimal::ONE || commission_rate < Decimal::ZERO { eprintln!( - "Validator validity predicate code validation failed with {}", - err + "The validator commission rate must not exceed 1.0 or 100%, and \ + it must be 0 or positive" ); if !tx_args.force { safe_exit(1) } } - let rewards_vp_code = rewards_vp_code_path - .map(|path| ctx.read_wasm(path)) - .unwrap_or_else(|| ctx.read_wasm(VP_USER_WASM)); - // Validate the rewards VP code - if let Err(err) = vm::validate_untrusted_wasm(&rewards_vp_code) { + if max_commission_rate_change > Decimal::ONE + || max_commission_rate_change < Decimal::ZERO + { + eprintln!( + "The validator maximum change in commission rate per epoch must \ + not exceed 1.0 or 100%" + ); + if !tx_args.force { + safe_exit(1) + } + } + // Validate the validator VP code + if let Err(err) = vm::validate_untrusted_wasm(&validator_vp_code) { eprintln!( - "Staking reward account validity predicate code validation failed \ - with {}", + "Validator validity predicate code validation failed with {}", err ); if !tx_args.force { @@ -367,11 +363,11 @@ pub async fn submit_init_validator( ð_hot_key.ref_to(), ) .unwrap(), - rewards_account_key, protocol_key, dkg_key, + commission_rate, + max_commission_rate_change, validator_vp_code, - rewards_vp_code, }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); let tx = Tx::new(tx_code, Some(data)); @@ -379,21 +375,10 @@ pub async fn submit_init_validator( process_tx(ctx, &tx_args, tx, TxSigningKey::WalletAddress(source)) .await; if !tx_args.dry_run { - let (validator_address_alias, validator_address, rewards_address_alias) = + let (validator_address_alias, validator_address) = match &initialized_accounts[..] { - // There should be 2 accounts, one for the validator itself, one - // for its staking reward address. - [account_1, account_2] => { - // We need to find out which address is which - let (validator_address, rewards_address) = - if rpc::is_validator(account_1, tx_args.ledger_address) - .await - { - (account_1, account_2) - } else { - (account_2, account_1) - }; - + // There should be 1 account for the validator itself + [validator_address] => { let validator_address_alias = match tx_args .initialized_account_alias { @@ -428,23 +413,7 @@ pub async fn submit_init_validator( validator_address.encode() ); } - let rewards_address_alias = - format!("{}-rewards", validator_address_alias); - if let Some(new_alias) = ctx.wallet.add_address( - rewards_address_alias.clone(), - rewards_address.clone(), - ) { - println!( - "Added alias {} for address {}.", - new_alias, - rewards_address.encode() - ); - } - ( - validator_address_alias, - validator_address.clone(), - rewards_address_alias, - ) + (validator_address_alias, validator_address.clone()) } _ => { eprintln!("Expected two accounts to be created"); @@ -465,10 +434,8 @@ pub async fn submit_init_validator( "The validator's addresses and keys were stored in the wallet:" ); println!(" Validator address \"{}\"", validator_address_alias); - println!(" Staking reward address \"{}\"", rewards_address_alias); println!(" Validator account key \"{}\"", validator_key_alias); println!(" Consensus key \"{}\"", consensus_key_alias); - println!(" Staking reward key \"{}\"", rewards_key_alias); println!( "The ledger node has been setup to use this validator's address \ and consensus key." @@ -1362,7 +1329,7 @@ fn make_asset_type(epoch: Epoch, token: &Address) -> AssetType { AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") } -/// Convert Anoma amount and token type to MASP equivalents +/// Convert Namada amount and token type to MASP equivalents fn convert_amount( epoch: Epoch, token: &Address, @@ -1650,7 +1617,11 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { let (default_signer, amount, token) = if source == masp_addr && target == masp_addr { // TODO Refactor me, we shouldn't rely on any specific token here. - (TxSigningKey::SecretKey(masp_tx_key()), 0.into(), nam()) + ( + TxSigningKey::SecretKey(masp_tx_key()), + 0.into(), + ctx.native_token.clone(), + ) } else if source == masp_addr { ( TxSigningKey::SecretKey(masp_tx_key()), @@ -1953,9 +1924,13 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { safe_exit(1) }; - let balance = rpc::get_token_balance(&client, &nam(), &proposal.author) - .await - .unwrap_or_default(); + let balance = rpc::get_token_balance( + &client, + &ctx.native_token, + &proposal.author, + ) + .await + .unwrap_or_default(); if balance < token::Amount::from(governance_parameters.min_proposal_fund) { @@ -2071,12 +2046,9 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { safe_exit(1) } } - let mut delegation_addresses = rpc::get_delegators_delegation( - &client, - &voter_address, - epoch, - ) - .await; + let mut delegations = + rpc::get_delegators_delegation(&client, &voter_address) + .await; // Optimize by quering if a vote from a validator // is equal to ours. If so, we can avoid voting, but ONLY if we @@ -2093,22 +2065,20 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { ) .await { - delegation_addresses = filter_delegations( + delegations = filter_delegations( &client, - delegation_addresses, + delegations, proposal_id, &args.vote, ) .await; } - println!("{:?}", delegation_addresses); - let tx_data = VoteProposalData { id: proposal_id, vote: args.vote, voter: voter_address, - delegations: delegation_addresses, + delegations: delegations.into_iter().collect(), }; let data = tx_data @@ -2138,6 +2108,114 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { } } +pub async fn submit_reveal_pk(mut ctx: Context, args: args::RevealPk) { + let args::RevealPk { + tx: args, + public_key, + } = args; + let public_key = ctx.get_cached(&public_key); + if !reveal_pk_if_needed(&mut ctx, &public_key, &args).await { + let addr: Address = (&public_key).into(); + println!("PK for {addr} is already revealed, nothing to do."); + } +} + +pub async fn reveal_pk_if_needed( + ctx: &mut Context, + public_key: &common::PublicKey, + args: &args::Tx, +) -> bool { + let addr: Address = public_key.into(); + // Check if PK revealed + if args.force || !has_revealed_pk(&addr, args.ledger_address.clone()).await + { + // If not, submit it + submit_reveal_pk_aux(ctx, public_key, args).await; + true + } else { + false + } +} + +pub async fn has_revealed_pk( + addr: &Address, + ledger_address: TendermintAddress, +) -> bool { + rpc::get_public_key(addr, ledger_address).await.is_some() +} + +pub async fn submit_reveal_pk_aux( + ctx: &mut Context, + public_key: &common::PublicKey, + args: &args::Tx, +) { + let addr: Address = public_key.into(); + println!("Submitting a tx to reveal the public key for address {addr}..."); + let tx_data = public_key + .try_to_vec() + .expect("Encoding a public key shouldn't fail"); + let tx_code = ctx.read_wasm(TX_REVEAL_PK); + let tx = Tx::new(tx_code, Some(tx_data)); + + // submit_tx without signing the inner tx + let keypair = if let Some(signing_key) = &args.signing_key { + ctx.get_cached(signing_key) + } else if let Some(signer) = args.signer.as_ref() { + let signer = ctx.get(signer); + find_keypair(&mut ctx.wallet, &signer, args.ledger_address.clone()) + .await + } else { + find_keypair(&mut ctx.wallet, &addr, args.ledger_address.clone()).await + }; + let epoch = rpc::query_epoch(args::Query { + ledger_address: args.ledger_address.clone(), + }) + .await; + let to_broadcast = if args.dry_run { + TxBroadcastData::DryRun(tx) + } else { + super::signing::sign_wrapper(ctx, args, epoch, tx, &keypair).await + }; + + if args.dry_run { + if let TxBroadcastData::DryRun(tx) = to_broadcast { + rpc::dry_run_tx(&args.ledger_address, tx.to_bytes()).await; + } else { + panic!( + "Expected a dry-run transaction, received a wrapper \ + transaction instead" + ); + } + } else { + // Either broadcast or submit transaction and collect result into + // sum type + let result = if args.broadcast_only { + Left(broadcast_tx(args.ledger_address.clone(), &to_broadcast).await) + } else { + Right(submit_tx(args.ledger_address.clone(), to_broadcast).await) + }; + // Return result based on executed operation, otherwise deal with + // the encountered errors uniformly + match result { + Right(Err(err)) => { + eprintln!( + "Encountered error while broadcasting transaction: {}", + err + ); + safe_exit(1) + } + Left(Err(err)) => { + eprintln!( + "Encountered error while broadcasting transaction: {}", + err + ); + safe_exit(1) + } + _ => {} + } + } +} + /// Check if current epoch is in the last third of the voting period of the /// proposal. This ensures that it is safe to optimize the vote writing to /// storage. @@ -2157,7 +2235,7 @@ async fn is_safe_voting_window( match proposal_end_epoch { Some(proposal_end_epoch) => { - !namada::ledger::governance::vp::is_valid_validator_voting_period( + !namada::ledger::native_vp::governance::utils::is_valid_validator_voting_period( current_epoch, proposal_start_epoch, proposal_end_epoch, @@ -2174,33 +2252,37 @@ async fn is_safe_voting_window( /// vote) async fn filter_delegations( client: &HttpClient, - mut delegation_addresses: Vec
, + delegations: HashSet
, proposal_id: u64, delegator_vote: &ProposalVote, -) -> Vec
{ - let mut remove_indexes: Vec = vec![]; - - for (index, validator_address) in delegation_addresses.iter().enumerate() { - let vote_key = gov_storage::get_vote_proposal_key( - proposal_id, - validator_address.to_owned(), - validator_address.to_owned(), - ); - - if let Some(validator_vote) = - rpc::query_storage_value::(client, &vote_key).await - { - if &validator_vote == delegator_vote { - remove_indexes.push(index); - } - } - } - - for index in remove_indexes { - delegation_addresses.swap_remove(index); - } +) -> HashSet
{ + // Filter delegations by their validator's vote concurrently + let delegations = futures::future::join_all( + delegations + .into_iter() + // we cannot use `filter/filter_map` directly because we want to + // return a future + .map(|validator_address| async { + let vote_key = gov_storage::get_vote_proposal_key( + proposal_id, + validator_address.to_owned(), + validator_address.to_owned(), + ); - delegation_addresses + if let Some(validator_vote) = + rpc::query_storage_value::(client, &vote_key) + .await + { + if &validator_vote == delegator_vote { + return None; + } + } + Some(validator_address) + }), + ) + .await; + // Take out the `None`s + delegations.into_iter().flatten().collect() } pub async fn submit_bond(ctx: Context, args: args::Bond) { @@ -2232,7 +2314,7 @@ pub async fn submit_bond(ctx: Context, args: args::Bond) { // Check bond's source (source for delegation or validator for self-bonds) // balance let bond_source = source.as_ref().unwrap_or(&validator); - let balance_key = token::balance_key(&address::nam(), bond_source); + let balance_key = token::balance_key(&ctx.native_token, bond_source); let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); match rpc::query_storage_value::(&client, &balance_key).await { @@ -2421,6 +2503,88 @@ pub async fn submit_withdraw(ctx: Context, args: args::Withdraw) { .await; } +pub async fn submit_validator_commission_change( + ctx: Context, + args: args::TxCommissionRateChange, +) { + let epoch = rpc::query_epoch(args::Query { + ledger_address: args.tx.ledger_address.clone(), + }) + .await; + + let tx_code = ctx.read_wasm(TX_CHANGE_COMMISSION_WASM); + let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); + + let validator = ctx.get(&args.validator); + if rpc::is_validator(&validator, args.tx.ledger_address.clone()).await { + if args.rate < Decimal::ZERO || args.rate > Decimal::ONE { + eprintln!("Invalid new commission rate, received {}", args.rate); + if !args.tx.force { + safe_exit(1) + } + } + + let commission_rate_key = + ledger::pos::validator_commission_rate_key(&validator); + let max_commission_rate_change_key = + ledger::pos::validator_max_commission_rate_change_key(&validator); + let commission_rates = rpc::query_storage_value::( + &client, + &commission_rate_key, + ) + .await; + let max_change = rpc::query_storage_value::( + &client, + &max_commission_rate_change_key, + ) + .await; + + match (commission_rates, max_change) { + (Some(rates), Some(max_change)) => { + // Assuming that pipeline length = 2 + let rate_next_epoch = rates.get(epoch.next()).unwrap(); + if (args.rate - rate_next_epoch).abs() > max_change { + eprintln!( + "New rate is too large of a change with respect to \ + the predecessor epoch in which the rate will take \ + effect." + ); + if !args.tx.force { + safe_exit(1) + } + } + } + _ => { + eprintln!("Error retrieving from storage"); + if !args.tx.force { + safe_exit(1) + } + } + } + } else { + eprintln!("The given address {validator} is not a validator."); + if !args.tx.force { + safe_exit(1) + } + } + + let data = pos::CommissionChange { + validator: ctx.get(&args.validator), + new_rate: args.rate, + }; + let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); + + let tx = Tx::new(tx_code, Some(data)); + let default_signer = args.validator; + process_tx( + ctx, + &args.tx, + tx, + TxSigningKey::WalletAddress(default_signer), + ) + .await; +} + /// Submit transaction and wait for result. Returns a list of addresses /// initialized in the transaction if any. In dry run, this is always empty. pub async fn process_tx( diff --git a/apps/src/lib/client/types.rs b/apps/src/lib/client/types.rs index 1f94838d252..5a26244474f 100644 --- a/apps/src/lib/client/types.rs +++ b/apps/src/lib/client/types.rs @@ -8,11 +8,11 @@ use namada::types::masp::{TransferSource, TransferTarget}; use namada::types::storage::Epoch; use namada::types::transaction::GasLimit; use namada::types::{key, token}; -use tendermint_config::net::Address as TendermintAddress; use super::rpc; use crate::cli::{args, Context}; use crate::client::tx::Conversions; +use crate::facade::tendermint_config::net::Address as TendermintAddress; #[derive(Clone, Debug)] pub struct ParsedTxArgs { diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 5be3bb73fcb..99e7d4b7d48 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -16,6 +16,7 @@ use namada::types::key::*; use prost::bytes::Bytes; use rand::prelude::ThreadRng; use rand::thread_rng; +use rust_decimal::Decimal; use serde_json::json; use sha2::{Digest, Sha256}; @@ -34,15 +35,16 @@ use crate::wasm_loader; pub const NET_ACCOUNTS_DIR: &str = "setup"; pub const NET_OTHER_ACCOUNTS_DIR: &str = "other"; -/// Github URL prefix of released Anoma network configs -pub const ENV_VAR_NETWORK_CONFIGS_SERVER: &str = "ANOMA_NETWORK_CONFIGS_SERVER"; +/// Github URL prefix of released Namada network configs +pub const ENV_VAR_NETWORK_CONFIGS_SERVER: &str = + "NAMADA_NETWORK_CONFIGS_SERVER"; const DEFAULT_NETWORK_CONFIGS_SERVER: &str = "https://github.com/heliaxdev/anoma-network-config/releases/download"; /// We do pre-genesis validator set up in this directory pub const PRE_GENESIS_DIR: &str = "pre-genesis"; -/// Configure Anoma to join an existing network. The chain must be released in +/// Configure Namada to join an existing network. The chain must be released in /// the repository. pub async fn join_network( global_args: args::Global, @@ -158,7 +160,7 @@ pub async fn join_network( // Rename the base-dir from the default and rename wasm-dir, if non-default. if non_default_dir { - // For compatibility for networks released with Anoma <= v0.4: + // For compatibility for networks released with Namada <= v0.4: // The old releases include the WASM directory at root path of the // archive. This has been moved into the chain directory, so if the // WASM dir is found at the old path, we move it to the new path. @@ -400,8 +402,7 @@ pub fn init_network( archive_dir, }: args::InitNetwork, ) { - let mut config = - genesis_config::open_genesis_config(&genesis_path).unwrap(); + let mut config = genesis_config::open_genesis_config(genesis_path).unwrap(); // Update the WASM checksums let checksums = @@ -475,10 +476,7 @@ pub fn init_network( // Generate account and reward addresses let address = address::gen_established_address("validator account"); - let reward_address = - address::gen_established_address("validator reward account"); config.address = Some(address.to_string()); - config.staking_reward_address = Some(reward_address.to_string()); // Generate the consensus, account and reward keys, unless they're // pre-defined. @@ -518,24 +516,6 @@ pub fn init_network( keypair.ref_to() }); - let staking_reward_pk = try_parse_public_key( - format!("validator {name} staking reward key"), - &config.staking_reward_public_key, - ) - .unwrap_or_else(|| { - let alias = format!("{}-reward-key", name); - println!( - "Generating validator {} staking reward account key...", - name - ); - let (_alias, keypair) = wallet.gen_key( - SchemeType::Ed25519, - Some(alias), - unsafe_dont_encrypt, - ); - keypair.ref_to() - }); - let protocol_pk = try_parse_public_key( format!("validator {name} protocol key"), &config.protocol_public_key, @@ -614,8 +594,6 @@ pub fn init_network( Some(genesis_config::HexString(consensus_pk.to_string())); config.account_public_key = Some(genesis_config::HexString(account_pk.to_string())); - config.staking_reward_public_key = - Some(genesis_config::HexString(staking_reward_pk.to_string())); config.eth_cold_key = Some(genesis_config::HexString(eth_cold_pk.to_string())); config.eth_hot_key = @@ -628,7 +606,6 @@ pub fn init_network( // Write keypairs to wallet wallet.add_address(name.clone(), address); - wallet.add_address(format!("{}-reward", &name), reward_address); wallet.save().unwrap(); }); @@ -647,18 +624,16 @@ pub fn init_network( }) } - if let Some(token) = &mut config.token { - token.iter_mut().for_each(|(name, config)| { - if config.address.is_none() { - let address = address::gen_established_address("token"); - config.address = Some(address.to_string()); - wallet.add_address(name.clone(), address); - } - if config.vp.is_none() { - config.vp = Some("vp_token".to_string()); - } - }) - } + config.token.iter_mut().for_each(|(name, config)| { + if config.address.is_none() { + let address = address::gen_established_address("token"); + config.address = Some(address.to_string()); + wallet.add_address(name.clone(), address); + } + if config.vp.is_none() { + config.vp = Some("vp_token".to_string()); + } + }); if let Some(implicit) = &mut config.implicit { implicit.iter_mut().for_each(|(name, config)| { @@ -714,7 +689,7 @@ pub fn init_network( fs::rename(&temp_dir, &chain_dir).unwrap(); // Copy the WASM checksums - let wasm_dir_full = chain_dir.join(&config::DEFAULT_WASM_DIR); + let wasm_dir_full = chain_dir.join(config::DEFAULT_WASM_DIR); fs::create_dir_all(&wasm_dir_full).unwrap(); fs::copy( &wasm_checksums_path, @@ -731,14 +706,14 @@ pub fn init_network( .join(config::DEFAULT_BASE_DIR); let temp_validator_chain_dir = validator_dir.join(temp_chain_id.as_str()); - let validator_chain_dir = validator_dir.join(&chain_id.as_str()); + let validator_chain_dir = validator_dir.join(chain_id.as_str()); // Rename the generated directories for validators from `temp_chain_id` // to `chain_id` - std::fs::rename(&temp_validator_chain_dir, &validator_chain_dir) + std::fs::rename(temp_validator_chain_dir, &validator_chain_dir) .unwrap(); // Copy the WASM checksums - let wasm_dir_full = validator_chain_dir.join(&config::DEFAULT_WASM_DIR); + let wasm_dir_full = validator_chain_dir.join(config::DEFAULT_WASM_DIR); fs::create_dir_all(&wasm_dir_full).unwrap(); fs::copy( &wasm_checksums_path, @@ -931,18 +906,36 @@ fn init_established_account( } } -/// Initialize genesis validator's address, staking reward address, -/// consensus key, validator account key and staking rewards key and use -/// it in the ledger's node. +/// Initialize genesis validator's address, consensus key and validator account +/// key and use it in the ledger's node. pub fn init_genesis_validator( global_args: args::Global, args::InitGenesisValidator { alias, + commission_rate, + max_commission_rate_change, net_address, unsafe_dont_encrypt, key_scheme, }: args::InitGenesisValidator, ) { + // Validate the commission rate data + if commission_rate > Decimal::ONE || commission_rate < Decimal::ZERO { + eprintln!( + "The validator commission rate must not exceed 1.0 or 100%, and \ + it must be 0 or positive" + ); + cli::safe_exit(1) + } + if max_commission_rate_change > Decimal::ONE + || max_commission_rate_change < Decimal::ZERO + { + eprintln!( + "The validator maximum change in commission rate per epoch must \ + not exceed 1.0 or 100%" + ); + cli::safe_exit(1) + } let pre_genesis_dir = validator_pre_genesis_dir(&global_args.base_dir, &alias); println!("Generating validator keys..."); @@ -979,9 +972,6 @@ pub fn init_genesis_validator( account_public_key: Some(HexString( pre_genesis.account_key.ref_to().to_string(), )), - staking_reward_public_key: Some(HexString( - pre_genesis.rewards_key.ref_to().to_string(), - )), protocol_public_key: Some(HexString( pre_genesis .store @@ -1000,6 +990,8 @@ pub fn init_genesis_validator( .public() .to_string(), )), + commission_rate: Some(commission_rate), + max_commission_rate_change: Some(max_commission_rate_change), tendermint_node_key: Some(HexString( pre_genesis.tendermint_node_key.ref_to().to_string(), )), @@ -1091,7 +1083,7 @@ pub fn write_tendermint_node_key( .create(true) .write(true) .truncate(true) - .open(&node_key_path) + .open(node_key_path) .expect("Couldn't create validator node key file"); serde_json::to_writer_pretty(file, &tm_node_keypair_json) .expect("Couldn't write validator node key file"); diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index d27827a1d9c..01814335b2a 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -8,15 +8,16 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; use namada::ledger::eth_bridge::parameters::EthereumBridgeConfig; use namada::ledger::governance::parameters::GovParams; -use namada::ledger::parameters::Parameters; +use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::address::Address; #[cfg(not(feature = "dev"))] use namada::types::chain::ChainId; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; -use namada::types::time::DateTimeUtc; +use namada::types::time::{DateTimeUtc, DurationSecs}; use namada::types::{storage, token}; +use rust_decimal::Decimal; /// Genesis configuration file format pub mod genesis_config { @@ -29,20 +30,20 @@ pub mod genesis_config { use data_encoding::HEXLOWER; use eyre::Context; use namada::ledger::governance::parameters::GovParams; - use namada::ledger::parameters::{EpochDuration, Parameters}; - use namada::ledger::pos::types::BasisPoints; + use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::types::address::Address; use namada::types::key::dkg_session_keys::DkgPublicKey; use namada::types::key::*; use namada::types::time::Rfc3339String; use namada::types::{storage, token}; + use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use thiserror::Error; use super::{ EstablishedAccount, EthereumBridgeConfig, Genesis, ImplicitAccount, - TokenAccount, Validator, + Parameters, TokenAccount, Validator, }; use crate::cli; @@ -106,10 +107,13 @@ pub mod genesis_config { pub struct GenesisConfig { // Genesis timestamp pub genesis_time: Rfc3339String, + // Name of the native token - this must one of the tokens included in + // the `token` field + pub native_token: String, // Initial validator set pub validator: HashMap, // Token accounts present at genesis - pub token: Option>, + pub token: HashMap, // Established accounts present at genesis pub established: Option>, // Implicit accounts present at genesis @@ -167,8 +171,6 @@ pub mod genesis_config { pub eth_hot_key: Option, // Public key for validator account. (default: generate) pub account_public_key: Option, - // Public key for staking reward account. (default: generate) - pub staking_reward_public_key: Option, // Public protocol signing key for validator account. (default: // generate) pub protocol_public_key: Option, @@ -176,18 +178,19 @@ pub mod genesis_config { pub dkg_public_key: Option, // Validator address (default: generate). pub address: Option, - // Staking reward account address (default: generate). - pub staking_reward_address: Option, // Total number of tokens held at genesis. // XXX: u64 doesn't work with toml-rs! pub tokens: Option, // Unstaked balance at genesis. // XXX: u64 doesn't work with toml-rs! pub non_staked_balance: Option, + /// Commission rate charged on rewards for delegators (bounded inside + /// 0-1) + pub commission_rate: Option, + /// Maximum change in commission rate permitted per epoch + pub max_commission_rate_change: Option, // Filename of validator VP. (default: default validator VP) pub validator_vp: Option, - // Filename of staking reward account VP. (default: user VP) - pub staking_reward_vp: Option, // IP:port of the validator. (used in generation only) pub net_address: Option, /// Tendermint node key is used to derive Tendermint node ID for node @@ -229,9 +232,6 @@ pub mod genesis_config { // Minimum number of blocks per epoch. // XXX: u64 doesn't work with toml-rs! pub min_num_of_blocks: u64, - // Minimum duration of an epoch (in seconds). - // TODO: this is i64 because datetime wants it - pub min_duration: i64, // Maximum duration per block (in seconds). // TODO: this is i64 because datetime wants it pub max_expected_time_per_block: i64, @@ -241,6 +241,14 @@ pub mod genesis_config { // Hashes of whitelisted txs array. `None` value or an empty array // disables whitelisting. pub tx_whitelist: Option>, + /// Filename of implicit accounts validity predicate WASM code + pub implicit_vp: String, + /// Expected number of epochs per year + pub epochs_per_year: u64, + /// PoS gain p + pub pos_gain_p: Decimal, + /// PoS gain d + pub pos_gain_d: Decimal, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -254,23 +262,28 @@ pub mod genesis_config { // Unbonding length (in epochs). // XXX: u64 doesn't work with toml-rs! pub unbonding_len: u64, - // Votes per token (in basis points). + // Votes per token. // XXX: u64 doesn't work with toml-rs! - pub votes_per_token: u64, + pub tm_votes_per_token: Decimal, // Reward for proposing a block. // XXX: u64 doesn't work with toml-rs! - pub block_proposer_reward: u64, + pub block_proposer_reward: Decimal, // Reward for voting on a block. // XXX: u64 doesn't work with toml-rs! - pub block_vote_reward: u64, + pub block_vote_reward: Decimal, + // Maximum staking APY + // XXX: u64 doesn't work with toml-rs! + pub max_inflation_rate: Decimal, + // Target ratio of staked NAM tokens to total NAM tokens + pub target_staked_ratio: Decimal, // Portion of a validator's stake that should be slashed on a - // duplicate vote (in basis points). + // duplicate vote. // XXX: u64 doesn't work with toml-rs! - pub duplicate_vote_slash_rate: u64, + pub duplicate_vote_min_slash_rate: Decimal, // Portion of a validator's stake that should be slashed on a - // light client attack (in basis points). + // light client attack. // XXX: u64 doesn't work with toml-rs! - pub light_client_attack_slash_rate: u64, + pub light_client_attack_min_slash_rate: Decimal, } #[derive(Clone, Debug, Deserialize, Serialize)] @@ -285,17 +298,11 @@ pub mod genesis_config { ) -> Validator { let validator_vp_name = config.validator_vp.as_ref().unwrap(); let validator_vp_config = wasm.get(validator_vp_name).unwrap(); - let reward_vp_name = config.staking_reward_vp.as_ref().unwrap(); - let reward_vp_config = wasm.get(reward_vp_name).unwrap(); Validator { pos_data: GenesisValidator { - address: Address::decode(&config.address.as_ref().unwrap()) + address: Address::decode(config.address.as_ref().unwrap()) .unwrap(), - staking_reward_address: Address::decode( - &config.staking_reward_address.as_ref().unwrap(), - ) - .unwrap(), tokens: token::Amount::whole(config.tokens.unwrap_or_default()), consensus_key: config .consensus_public_key @@ -303,12 +310,6 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), - staking_reward_key: config - .staking_reward_public_key - .as_ref() - .unwrap() - .to_public_key() - .unwrap(), eth_cold_key: config .eth_cold_key .as_ref() @@ -321,6 +322,29 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), + commission_rate: config + .commission_rate + .and_then(|rate| { + if rate >= Decimal::ZERO && rate <= Decimal::ONE { + Some(rate) + } else { + None + } + }) + .expect("Commission rate must be between 0.0 and 1.0"), + max_commission_rate_change: config + .max_commission_rate_change + .and_then(|rate| { + if rate >= Decimal::ZERO && rate <= Decimal::ONE { + Some(rate) + } else { + None + } + }) + .expect( + "Max commission rate change must be between 0.0 and \ + 1.0", + ), }, account_key: config .account_public_key @@ -350,16 +374,6 @@ pub mod genesis_config { .unwrap() .to_sha256_bytes() .unwrap(), - reward_vp_code_path: reward_vp_config.filename.to_owned(), - reward_vp_sha256: reward_vp_config - .sha256 - .clone() - .unwrap_or_else(|| { - eprintln!("Unknown validator VP WASM sha256"); - cli::safe_exit(1); - }) - .to_sha256_bytes() - .unwrap(), } } @@ -374,8 +388,7 @@ pub mod genesis_config { let token_vp_config = wasm.get(token_vp_name).unwrap(); TokenAccount { - address: Address::decode(&config.address.as_ref().unwrap()) - .unwrap(), + address: Address::decode(config.address.as_ref().unwrap()).unwrap(), vp_code_path: token_vp_config.filename.to_owned(), vp_sha256: token_vp_config .sha256 @@ -393,7 +406,7 @@ pub mod genesis_config { .iter() .map(|(alias_or_address, amount)| { ( - match Address::decode(&alias_or_address) { + match Address::decode(alias_or_address) { Ok(address) => address, Err(decode_err) => { if let Some(alias) = @@ -456,8 +469,7 @@ pub mod genesis_config { let account_vp_config = wasm.get(account_vp_name).unwrap(); EstablishedAccount { - address: Address::decode(&config.address.as_ref().unwrap()) - .unwrap(), + address: Address::decode(config.address.as_ref().unwrap()).unwrap(), vp_code_path: account_vp_config.filename.to_owned(), vp_sha256: account_vp_config .sha256 @@ -479,7 +491,7 @@ pub mod genesis_config { .iter() .map(|(address, hex)| { ( - storage::Key::parse(&address).unwrap(), + storage::Key::parse(address).unwrap(), hex.to_bytes().unwrap(), ) }) @@ -499,32 +511,55 @@ pub mod genesis_config { } pub fn load_genesis_config(config: GenesisConfig) -> Genesis { - let wasms = config.wasm; - let validators: HashMap = config - .validator - .iter() - .map(|(name, cfg)| (name.clone(), load_validator(cfg, &wasms))) - .collect(); - let established_accounts: HashMap = config - .established - .unwrap_or_default() + let GenesisConfig { + genesis_time, + native_token, + validator, + token, + established, + implicit, + parameters, + pos_params, + gov_params, + wasm, + ethereum_bridge_params, + } = config; + + let native_token = Address::decode( + token + .get(&native_token) + .expect( + "Native token's alias must be present in the declared \ + tokens", + ) + .address + .as_ref() + .expect("Missing native token address"), + ) + .expect("Invalid address"); + + let validators: HashMap = validator .iter() - .map(|(name, cfg)| (name.clone(), load_established(cfg, &wasms))) + .map(|(name, cfg)| (name.clone(), load_validator(cfg, &wasm))) .collect(); - let implicit_accounts: HashMap = config - .implicit + let established_accounts: HashMap = + established + .unwrap_or_default() + .iter() + .map(|(name, cfg)| (name.clone(), load_established(cfg, &wasm))) + .collect(); + let implicit_accounts: HashMap = implicit .unwrap_or_default() .iter() .map(|(name, cfg)| (name.clone(), load_implicit(cfg))) .collect(); - let token_accounts = config - .token - .unwrap_or_default() + #[allow(clippy::iter_kv_map)] + let token_accounts = token .iter() .map(|(_name, cfg)| { load_token( cfg, - &wasms, + &wasm, &validators, &established_accounts, &implicit_accounts, @@ -532,55 +567,89 @@ pub mod genesis_config { }) .collect(); + let implicit_vp_config = wasm.get(¶meters.implicit_vp).unwrap(); + let implicit_vp_code_path = implicit_vp_config.filename.to_owned(); + let implicit_vp_sha256 = implicit_vp_config + .sha256 + .clone() + .unwrap_or_else(|| { + eprintln!("Unknown implicit VP WASM sha256"); + cli::safe_exit(1); + }) + .to_sha256_bytes() + .unwrap(); + + let min_duration: i64 = + 60 * 60 * 24 * 365 / (parameters.epochs_per_year as i64); let parameters = Parameters { epoch_duration: EpochDuration { - min_num_of_blocks: config.parameters.min_num_of_blocks, + min_num_of_blocks: parameters.min_num_of_blocks, min_duration: namada::types::time::Duration::seconds( - config.parameters.min_duration, + min_duration, ) .into(), }, max_expected_time_per_block: namada::types::time::Duration::seconds( - config.parameters.max_expected_time_per_block, + parameters.max_expected_time_per_block, ) .into(), - vp_whitelist: config.parameters.vp_whitelist.unwrap_or_default(), - tx_whitelist: config.parameters.tx_whitelist.unwrap_or_default(), + vp_whitelist: parameters.vp_whitelist.unwrap_or_default(), + tx_whitelist: parameters.tx_whitelist.unwrap_or_default(), + implicit_vp_code_path, + implicit_vp_sha256, + epochs_per_year: parameters.epochs_per_year, + pos_gain_p: parameters.pos_gain_p, + pos_gain_d: parameters.pos_gain_d, + staked_ratio: Decimal::ZERO, + pos_inflation_amount: 0, }; + let GovernanceParamsConfig { + min_proposal_fund, + max_proposal_code_size, + min_proposal_period, + max_proposal_content_size, + min_proposal_grace_epochs, + max_proposal_period, + } = gov_params; let gov_params = GovParams { - min_proposal_fund: config.gov_params.min_proposal_fund, - max_proposal_code_size: config.gov_params.max_proposal_code_size, - min_proposal_period: config.gov_params.min_proposal_period, - max_proposal_period: config.gov_params.max_proposal_period, - max_proposal_content_size: config - .gov_params - .max_proposal_content_size, - min_proposal_grace_epochs: config - .gov_params - .min_proposal_grace_epochs, + min_proposal_fund, + max_proposal_code_size, + min_proposal_period, + max_proposal_content_size, + min_proposal_grace_epochs, + max_proposal_period, }; + let PosParamsConfig { + max_validator_slots, + pipeline_len, + unbonding_len, + tm_votes_per_token, + block_proposer_reward, + block_vote_reward, + max_inflation_rate, + target_staked_ratio, + duplicate_vote_min_slash_rate, + light_client_attack_min_slash_rate, + } = pos_params; let pos_params = PosParams { - max_validator_slots: config.pos_params.max_validator_slots, - pipeline_len: config.pos_params.pipeline_len, - unbonding_len: config.pos_params.unbonding_len, - votes_per_token: BasisPoints::new( - config.pos_params.votes_per_token, - ), - block_proposer_reward: config.pos_params.block_proposer_reward, - block_vote_reward: config.pos_params.block_vote_reward, - duplicate_vote_slash_rate: BasisPoints::new( - config.pos_params.duplicate_vote_slash_rate, - ), - light_client_attack_slash_rate: BasisPoints::new( - config.pos_params.light_client_attack_slash_rate, - ), + max_validator_slots, + pipeline_len, + unbonding_len, + tm_votes_per_token, + block_proposer_reward, + block_vote_reward, + max_inflation_rate, + target_staked_ratio, + duplicate_vote_min_slash_rate, + light_client_attack_min_slash_rate, }; let mut genesis = Genesis { - genesis_time: config.genesis_time.try_into().unwrap(), + genesis_time: genesis_time.try_into().unwrap(), + native_token, validators: validators.into_values().collect(), token_accounts, established_accounts: established_accounts.into_values().collect(), @@ -588,7 +657,7 @@ pub mod genesis_config { parameters, pos_params, gov_params, - ethereum_bridge_params: config.ethereum_bridge_params, + ethereum_bridge_params, }; genesis.init(); genesis @@ -629,6 +698,7 @@ pub mod genesis_config { #[borsh_init(init)] pub struct Genesis { pub genesis_time: DateTimeUtc, + pub native_token: Address, pub validators: Vec, pub token_accounts: Vec, pub established_accounts: Vec, @@ -674,17 +744,13 @@ pub struct Validator { pub protocol_key: common::PublicKey, /// The public DKG session key used during the DKG protocol pub dkg_public_key: DkgPublicKey, - /// These tokens are no staked and hence do not contribute to the + /// These tokens are not staked and hence do not contribute to the /// validator's voting power pub non_staked_balance: token::Amount, /// Validity predicate code WASM pub validator_vp_code_path: String, /// Expected SHA-256 hash of the validator VP pub validator_vp_sha256: [u8; 32], - /// Staking reward account code WASM - pub reward_vp_code_path: String, - /// Expected SHA-256 hash of the staking reward VP - pub reward_vp_sha256: [u8; 32], } #[derive( @@ -737,6 +803,46 @@ pub struct ImplicitAccount { pub public_key: common::PublicKey, } +/// Protocol parameters. This is almost the same as +/// `ledger::parameters::Parameters`, but instead of having the `implicit_vp` +/// WASM code bytes, it only has the name and sha as the actual code is loaded +/// on `init_chain` +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, +)] +pub struct Parameters { + /// Epoch duration + pub epoch_duration: EpochDuration, + /// Maximum expected time per block + pub max_expected_time_per_block: DurationSecs, + /// Whitelisted validity predicate hashes + pub vp_whitelist: Vec, + /// Whitelisted tx hashes + pub tx_whitelist: Vec, + /// Implicit accounts validity predicate code WASM + pub implicit_vp_code_path: String, + /// Expected SHA-256 hash of the implicit VP + pub implicit_vp_sha256: [u8; 32], + /// Expected number of epochs per year (read only) + pub epochs_per_year: u64, + /// PoS gain p (read only) + pub pos_gain_p: Decimal, + /// PoS gain d (read only) + pub pos_gain_d: Decimal, + /// PoS staked ratio (read + write for every epoch) + pub staked_ratio: Decimal, + /// PoS inflation amount from the last epoch (read + write for every epoch) + pub pos_inflation_amount: u64, +} + #[cfg(not(feature = "dev"))] pub fn genesis(base_dir: impl AsRef, chain_id: &ChainId) -> Genesis { let path = base_dir @@ -746,58 +852,48 @@ pub fn genesis(base_dir: impl AsRef, chain_id: &ChainId) -> Genesis { } #[cfg(feature = "dev")] pub fn genesis() -> Genesis { - use namada::ledger::parameters::EpochDuration; use namada::types::address; + use rust_decimal_macros::dec; use crate::wallet; + let vp_implicit_path = "vp_implicit.wasm"; let vp_token_path = "vp_token.wasm"; let vp_user_path = "vp_user.wasm"; // NOTE When the validator's key changes, tendermint must be reset with - // `anoma reset` command. To generate a new validator, use the + // `namada reset` command. To generate a new validator, use the // `tests::gen_genesis_validator` below. let consensus_keypair = wallet::defaults::validator_keypair(); let account_keypair = wallet::defaults::validator_keypair(); - let ed_staking_reward_keypair = ed25519::SecretKey::try_from_slice(&[ - 61, 198, 87, 204, 44, 94, 234, 228, 217, 72, 245, 27, 40, 2, 151, 174, - 24, 247, 69, 6, 9, 30, 44, 16, 88, 238, 77, 162, 243, 125, 240, 206, - ]) - .unwrap(); - let secp_eth_cold_keypair = secp256k1::SecretKey::try_from_slice(&[ 90, 83, 107, 155, 193, 251, 120, 27, 76, 1, 188, 8, 116, 121, 90, 99, 65, 17, 187, 6, 238, 141, 63, 188, 76, 38, 102, 7, 47, 185, 28, 52, ]) .unwrap(); - let staking_reward_keypair = - common::SecretKey::try_from_sk(&ed_staking_reward_keypair).unwrap(); let eth_cold_keypair = common::SecretKey::try_from_sk(&secp_eth_cold_keypair).unwrap(); let address = wallet::defaults::validator_address(); - let staking_reward_address = Address::decode("atest1v4ehgw36xcersvee8qerxd35x9prsw2xg5erxv6pxfpygd2x89z5xsf5xvmnysejgv6rwd2rnj2avt").unwrap(); let (protocol_keypair, eth_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); let validator = Validator { pos_data: GenesisValidator { address, - staking_reward_address, tokens: token::Amount::whole(200_000), consensus_key: consensus_keypair.ref_to(), - staking_reward_key: staking_reward_keypair.ref_to(), eth_cold_key: eth_cold_keypair.ref_to(), eth_hot_key: eth_bridge_keypair.ref_to(), + commission_rate: dec!(0.05), + max_commission_rate_change: dec!(0.01), }, account_key: account_keypair.ref_to(), protocol_key: protocol_keypair.ref_to(), dkg_public_key: dkg_keypair.public(), non_staked_balance: token::Amount::whole(100_000), - // TODO replace with https://github.com/anoma/anoma/issues/25) + // TODO replace with https://github.com/anoma/namada/issues/25) validator_vp_code_path: vp_user_path.into(), validator_vp_sha256: Default::default(), - reward_vp_code_path: vp_user_path.into(), - reward_vp_sha256: Default::default(), }; let parameters = Parameters { epoch_duration: EpochDuration { @@ -807,6 +903,14 @@ pub fn genesis() -> Genesis { max_expected_time_per_block: namada::types::time::DurationSecs(30), vp_whitelist: vec![], tx_whitelist: vec![], + implicit_vp_code_path: vp_implicit_path.into(), + implicit_vp_sha256: Default::default(), + epochs_per_year: 525_600, /* seconds in yr (60*60*24*365) div seconds + * per epoch (60 = min_duration) */ + pos_gain_p: dec!(0.1), + pos_gain_d: dec!(0.1), + staked_ratio: dec!(0.0), + pos_inflation_amount: 0, }; let albert = EstablishedAccount { address: wallet::defaults::albert_address(), @@ -864,8 +968,8 @@ pub fn genesis() -> Genesis { ((&validator.account_key).into(), default_key_tokens), ]); let token_accounts = address::tokens() - .into_iter() - .map(|(address, _)| TokenAccount { + .into_keys() + .map(|address| TokenAccount { address, vp_code_path: vp_token_path.into(), vp_sha256: Default::default(), @@ -882,6 +986,7 @@ pub fn genesis() -> Genesis { pos_params: PosParams::default(), gov_params: GovParams::default(), ethereum_bridge_params: None, + native_token: address::nam(), } } @@ -896,18 +1001,14 @@ pub mod tests { use crate::wallet; /// Run `cargo test gen_genesis_validator -- --nocapture` to generate a - /// new genesis validator address, staking reward address and keypair. + /// new genesis validator address and keypair. #[test] fn gen_genesis_validator() { let address = gen_established_address(); - let staking_reward_address = gen_established_address(); let mut rng: ThreadRng = thread_rng(); let keypair: common::SecretKey = ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); let kp_arr = keypair.try_to_vec().unwrap(); - let staking_reward_keypair: common::SecretKey = - ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); - let srkp_arr = staking_reward_keypair.try_to_vec().unwrap(); let (protocol_keypair, _eth_hot_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); @@ -922,9 +1023,7 @@ pub mod tests { .unwrap(); println!("address: {}", address); - println!("staking_reward_address: {}", staking_reward_address); println!("keypair: {:?}", kp_arr); - println!("staking_reward_keypair: {:?}", srkp_arr); println!("protocol_keypair: {:?}", protocol_keypair); println!("dkg_keypair: {:?}", dkg_keypair.try_to_vec().unwrap()); println!( diff --git a/apps/src/lib/config/mod.rs b/apps/src/lib/config/mod.rs index b435433c76a..e89aced2510 100644 --- a/apps/src/lib/config/mod.rs +++ b/apps/src/lib/config/mod.rs @@ -21,17 +21,17 @@ use crate::facade::tendermint::Timeout; use crate::facade::tendermint_config::net::Address as TendermintAddress; /// Base directory contains global config and chain directories. -pub const DEFAULT_BASE_DIR: &str = ".anoma"; +pub const DEFAULT_BASE_DIR: &str = ".namada"; /// Default WASM dir. pub const DEFAULT_WASM_DIR: &str = "wasm"; /// The WASM checksums file contains the hashes of built WASMs. It is inside the /// WASM dir. pub const DEFAULT_WASM_CHECKSUMS_FILE: &str = "checksums.json"; -/// Chain-specific Anoma configuration. Nested in chain dirs. +/// Chain-specific Namada configuration. Nested in chain dirs. pub const FILENAME: &str = "config.toml"; /// Chain-specific Tendermint configuration. Nested in chain dirs. pub const TENDERMINT_DIR: &str = "tendermint"; -/// Chain-specific Anoma DB. Nested in chain dirs. +/// Chain-specific Namada DB. Nested in chain dirs. pub const DB_DIR: &str = "db"; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -165,7 +165,7 @@ impl Ledger { IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 26661, ), - instrumentation_namespace: "anoman_tm".to_string(), + instrumentation_namespace: "namadan_tm".to_string(), }, ethereum_bridge: ethereum_bridge::ledger::Config::default(), } @@ -297,7 +297,7 @@ impl Config { .and_then(|c| c.merge(config::File::with_name(file_name))) .and_then(|c| { c.merge( - config::Environment::with_prefix("anoma").separator("__"), + config::Environment::with_prefix("namada").separator("__"), ) }) .map_err(Error::ReadError)?; diff --git a/apps/src/lib/logging.rs b/apps/src/lib/logging.rs index a71559a7f94..b60bab1f695 100644 --- a/apps/src/lib/logging.rs +++ b/apps/src/lib/logging.rs @@ -7,10 +7,10 @@ use tracing_log::LogTracer; use tracing_subscriber::filter::{Directive, EnvFilter}; use tracing_subscriber::fmt::Subscriber; -pub const ENV_KEY: &str = "ANOMA_LOG"; +pub const ENV_KEY: &str = "NAMADA_LOG"; // Env var to enable/disable color log -const COLOR_ENV_KEY: &str = "ANOMA_LOG_COLOR"; +const COLOR_ENV_KEY: &str = "NAMADA_LOG_COLOR"; pub fn init_from_env_or(default: impl Into) -> Result<()> { let filter = filter_from_env_or(default); diff --git a/apps/src/lib/mod.rs b/apps/src/lib/mod.rs index d8ab71236c9..65d0472e9eb 100644 --- a/apps/src/lib/mod.rs +++ b/apps/src/lib/mod.rs @@ -1,7 +1,7 @@ //! Shared code for the node, client etc. -#![doc(html_favicon_url = "https://dev.anoma.net/master/favicon.png")] -#![doc(html_logo_url = "https://dev.anoma.net/master/rustdoc-logo.png")] +#![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] +#![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] diff --git a/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs b/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs index eb32e2b1c2f..b23747be0c8 100644 --- a/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs +++ b/apps/src/lib/node/ledger/ethereum_node/oracle/mod.rs @@ -22,7 +22,7 @@ use super::test_tools::mock_web3_client::Web3; const DEFAULT_BACKOFF: Duration = std::time::Duration::from_secs(1); /// A client that can talk to geth and parse -/// and relay events relevant to Anoma to the +/// and relay events relevant to Namada to the /// ledger process pub struct Oracle { /// The client that talks to the Ethereum fullnode @@ -61,7 +61,7 @@ impl Oracle { } } - /// Send a series of [`EthereumEvent`]s to the Anoma + /// Send a series of [`EthereumEvent`]s to the Namada /// ledger. Returns a boolean indicating that all sent /// successfully. If false is returned, the receiver /// has hung up. @@ -131,7 +131,7 @@ pub fn run_oracle( } /// Given an oracle, watch for new Ethereum events, processing -/// them into Anoma native types. +/// them into Namada native types. /// /// It also checks that once the specified number of confirmations /// is reached, an event is forwarded to the ledger process diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index 99769d9254a..f6eb4c5f5bb 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -1,7 +1,6 @@ mod abortable; mod broadcaster; mod ethereum_node; -pub mod rpc; mod shell; mod shims; pub mod storage; @@ -32,6 +31,7 @@ use crate::config::{ethereum_bridge, TendermintMode}; use crate::facade::tendermint_proto::abci::CheckTxType; use crate::facade::tower_abci::{response, split, Server}; use crate::node::ledger::broadcaster::Broadcaster; +use crate::node::ledger::config::genesis; use crate::node::ledger::ethereum_node::oracle; use crate::node::ledger::shell::{Error, MempoolTxType, Shell}; use crate::node::ledger::shims::abcipp_shim::AbcippShim; @@ -39,10 +39,10 @@ use crate::node::ledger::shims::abcipp_shim_types::shim::{Request, Response}; use crate::{config, wasm_loader}; /// Env. var to set a number of Tokio RT worker threads -const ENV_VAR_TOKIO_THREADS: &str = "ANOMA_TOKIO_THREADS"; +const ENV_VAR_TOKIO_THREADS: &str = "NAMADA_TOKIO_THREADS"; /// Env. var to set a number of Rayon global worker threads -const ENV_VAR_RAYON_THREADS: &str = "ANOMA_RAYON_THREADS"; +const ENV_VAR_RAYON_THREADS: &str = "NAMADA_RAYON_THREADS"; /// The maximum number of Ethereum events the channel between /// the oracle and the shell can hold. @@ -146,7 +146,7 @@ impl Shell { CheckTxType::New => MempoolTxType::NewTransaction, CheckTxType::Recheck => MempoolTxType::RecheckTransaction, }; - Ok(Response::CheckTx(self.mempool_validate(&*tx.tx, r#type))) + Ok(Response::CheckTx(self.mempool_validate(&tx.tx, r#type))) } Request::ListSnapshots(_) => { Ok(Response::ListSnapshots(Default::default())) @@ -287,7 +287,7 @@ async fn run_aux(config: config::Ledger, wasm_dir: PathBuf) { } } - tracing::info!("Anoma ledger node has shut down."); + tracing::info!("Namada ledger node has shut down."); let res = task::block_in_place(move || shell_handler.join()); @@ -456,6 +456,10 @@ fn start_abci_broadcaster_shell( // Construct our ABCI application. let tendermint_mode = config.tendermint.tendermint_mode.clone(); let ledger_address = config.shell.ledger_address; + #[cfg(not(feature = "dev"))] + let genesis = genesis::genesis(&config.shell.base_dir, &config.chain_id); + #[cfg(feature = "dev")] + let genesis = genesis::genesis(); let (shell, abci_service) = AbcippShim::new( config, wasm_dir, @@ -464,6 +468,7 @@ fn start_abci_broadcaster_shell( &db_cache, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + genesis.native_token, ); // Channel for signalling shut down to ABCI server @@ -486,7 +491,7 @@ fn start_abci_broadcaster_shell( let thread_builder = thread::Builder::new().name("ledger-shell".into()); let shell_handler = thread_builder .spawn(move || { - tracing::info!("Anoma ledger node started."); + tracing::info!("Namada ledger node started."); match tendermint_mode { TendermintMode::Validator => { tracing::info!("This node is a validator"); diff --git a/apps/src/lib/node/ledger/rpc.rs b/apps/src/lib/node/ledger/rpc.rs deleted file mode 100644 index b7a1ebcfad4..00000000000 --- a/apps/src/lib/node/ledger/rpc.rs +++ /dev/null @@ -1,123 +0,0 @@ -//! RPC endpoint is used for ledger state queries - -use std::fmt::Display; -use std::str::FromStr; - -use masp_primitives::asset_type::AssetType; -use namada::types::address::Address; -use namada::types::storage; -use namada::types::token::CONVERSION_KEY_PREFIX; -use thiserror::Error; - -use crate::facade::tendermint::abci::Path as AbciPath; - -/// RPC query path -#[derive(Debug, Clone)] -pub enum Path { - /// Dry run a transaction - DryRunTx, - /// Epoch of the last committed block - Epoch, - /// Results of all committed blocks - Results, - /// Read a storage value with exact storage key - Value(storage::Key), - /// Read a range of storage values with a matching key prefix - Prefix(storage::Key), - /// Check if the given storage key exists - HasKey(storage::Key), - /// Conversion associated with given asset type - Conversion(AssetType), -} - -#[derive(Debug, Clone)] -pub struct BalanceQuery { - #[allow(dead_code)] - owner: Option
, - #[allow(dead_code)] - token: Option
, -} - -const DRY_RUN_TX_PATH: &str = "dry_run_tx"; -const EPOCH_PATH: &str = "epoch"; -const RESULTS_PATH: &str = "results"; -const VALUE_PREFIX: &str = "value"; -const PREFIX_PREFIX: &str = "prefix"; -const HAS_KEY_PREFIX: &str = "has_key"; - -impl Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Path::DryRunTx => write!(f, "{}", DRY_RUN_TX_PATH), - Path::Epoch => write!(f, "{}", EPOCH_PATH), - Path::Results => write!(f, "{}", RESULTS_PATH), - Path::Value(storage_key) => { - write!(f, "{}/{}", VALUE_PREFIX, storage_key) - } - Path::Prefix(storage_key) => { - write!(f, "{}/{}", PREFIX_PREFIX, storage_key) - } - Path::HasKey(storage_key) => { - write!(f, "{}/{}", HAS_KEY_PREFIX, storage_key) - } - Path::Conversion(asset_type) => { - write!(f, "{}/{}", CONVERSION_KEY_PREFIX, asset_type) - } - } - } -} - -impl FromStr for Path { - type Err = PathParseError; - - fn from_str(s: &str) -> Result { - match s { - DRY_RUN_TX_PATH => Ok(Self::DryRunTx), - EPOCH_PATH => Ok(Self::Epoch), - RESULTS_PATH => Ok(Self::Results), - _ => match s.split_once('/') { - Some((VALUE_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Value(key)) - } - Some((PREFIX_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Prefix(key)) - } - Some((HAS_KEY_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::HasKey(key)) - } - Some((CONVERSION_KEY_PREFIX, asset_type)) => { - let key = AssetType::from_str(asset_type) - .map_err(PathParseError::InvalidAssetType)?; - Ok(Self::Conversion(key)) - } - _ => Err(PathParseError::InvalidPath(s.to_string())), - }, - } - } -} - -impl From for AbciPath { - fn from(path: Path) -> Self { - let path = path.to_string(); - // TODO: update in tendermint-rs to allow to construct this from owned - // string. It's what `from_str` does anyway - AbciPath::from_str(&path).unwrap() - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum PathParseError { - #[error("Unrecognized query path: {0}")] - InvalidPath(String), - #[error("Invalid storage key: {0}")] - InvalidStorageKey(storage::Error), - #[error("Unrecognized asset type: {0}")] - InvalidAssetType(std::io::Error), -} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index e26506566c3..0a194e01612 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,8 +1,10 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell +use namada::ledger::pos::types::into_tm_voting_power; use namada::ledger::protocol; use namada::types::storage::{BlockHash, BlockResults, Header}; use namada::types::transaction::protocol::ProtocolTxType; +use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; use super::governance::execute_governance_proposals; use super::*; @@ -151,7 +153,6 @@ where continue; } TxType::Protocol(protocol_tx) => match protocol_tx.tx { - #[cfg(not(feature = "abcipp"))] ProtocolTxType::EthEventsVext(ref ext) => { if self .mode @@ -167,29 +168,25 @@ where } Event::new_tx_event(&tx_type, height.0) } - #[cfg(not(feature = "abcipp"))] ProtocolTxType::ValSetUpdateVext(_) => { Event::new_tx_event(&tx_type, height.0) } - #[cfg(feature = "abcipp")] ProtocolTxType::EthereumEvents(ref digest) => { - if self - .mode - .get_validator_address() - .map(|validator| { - validator == &ext.data.validator_addr - }) - .unwrap_or(false) + if let Some(address) = + self.mode.get_validator_address().cloned() { - for event in - digest.events.iter().map(|signed| &signed.event) + let this_signer = + &(address, self.storage.last_height); + for MultiSignedEthEvent { event, signers } in + &digest.events { - self.mode.dequeue_eth_event(event); + if signers.contains(this_signer) { + self.mode.dequeue_eth_event(event); + } } } Event::new_tx_event(&tx_type, height.0) } - #[cfg(feature = "abcipp")] ProtocolTxType::ValidatorSetUpdate(_) => { Event::new_tx_event(&tx_type, height.0) } @@ -318,21 +315,16 @@ where .begin_block(hash, height) .expect("Beginning a block shouldn't fail"); + let header_time = header.time; self.storage .set_header(header) .expect("Setting a header shouldn't fail"); self.byzantine_validators = byzantine_validators; - let header = self - .storage - .header - .as_ref() - .expect("Header must have been set in prepare_proposal."); - let time = header.time; let new_epoch = self .storage - .update_epoch(height, time) + .update_epoch(height, header_time) .expect("Must be able to update epoch"); self.slash(); @@ -344,18 +336,19 @@ where fn update_epoch(&self, response: &mut shim::response::FinalizeBlock) { // Apply validator set update let (current_epoch, _gas) = self.storage.get_current_epoch(); + let pos_params = self.storage.read_pos_params(); // TODO ABCI validator updates on block H affects the validator set // on block H+2, do we need to update a block earlier? self.storage.validator_set_update(current_epoch, |update| { let (consensus_key, power) = match update { ValidatorSetUpdate::Active(ActiveValidator { consensus_key, - voting_power, + bonded_stake, }) => { - let power: u64 = voting_power.into(); - let power: i64 = power - .try_into() - .expect("unexpected validator's voting power"); + let power: i64 = into_tm_voting_power( + pos_params.tm_votes_per_token, + bonded_stake, + ); (consensus_key, power) } ValidatorSetUpdate::Deactivated(consensus_key) => { @@ -380,12 +373,10 @@ where /// are covered by the e2e tests. #[cfg(test)] mod test_finalize_block { - use namada::types::address::nam; use namada::types::ethereum_events::EthAddress; use namada::types::storage::Epoch; use namada::types::transaction::{EncryptionKey, Fee}; use namada::types::vote_extensions::ethereum_events; - #[cfg(feature = "abcipp")] use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; use super::*; @@ -412,7 +403,7 @@ mod test_finalize_block { let wrapper = WrapperTx::new( Fee { amount: i.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -483,7 +474,7 @@ mod test_finalize_block { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -535,7 +526,7 @@ mod test_finalize_block { let wrapper = WrapperTx { fee: Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, pk: keypair.ref_to(), epoch: Epoch(0), @@ -601,7 +592,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -632,7 +623,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -707,7 +698,6 @@ mod test_finalize_block { let protocol_key = shell.mode.get_protocol_key().expect("Test failed").clone(); - #[cfg(feature = "abcipp")] let tx = ProtocolTxType::EthereumEvents(ethereum_events::VextDigest { signatures: Default::default(), events: vec![], @@ -715,21 +705,6 @@ mod test_finalize_block { .sign(&protocol_key) .to_bytes(); - #[cfg(not(feature = "abcipp"))] - let tx = ProtocolTxType::EthEventsVext( - ethereum_events::Vext::empty( - LAST_HEIGHT, - shell - .mode - .get_validator_address() - .expect("Test failed") - .clone(), - ) - .sign(&protocol_key), - ) - .sign(&protocol_key) - .to_bytes(); - let req = FinalizeBlock { txs: vec![ProcessedTx { tx, @@ -749,9 +724,10 @@ mod test_finalize_block { } /// Test that once a validator's vote for an Ethereum event lands - /// on-chain, it dequeues from the list of events to vote on. + /// on-chain from a vote extension digest, it dequeues from the + /// list of events to vote on. #[test] - fn test_eth_events_dequeued() { + fn test_eth_events_dequeued_digest() { let (mut shell, _, oracle) = setup(); let protocol_key = shell.mode.get_protocol_key().expect("Test failed").clone(); @@ -772,7 +748,6 @@ mod test_finalize_block { assert_eq!(queued_event, event); // ---- The protocol tx that includes this event on-chain - #[allow(clippy::redundant_clone)] let ext = ethereum_events::Vext { block_height: shell.storage.last_height, ethereum_events: vec![event.clone()], @@ -780,13 +755,9 @@ mod test_finalize_block { } .sign(&protocol_key); - #[cfg(feature = "abcipp")] let processed_tx = { let signed = MultiSignedEthEvent { event, - #[cfg(feature = "abcipp")] - signers: BTreeSet::from([address.clone()]), - #[cfg(not(feature = "abcipp"))] signers: BTreeSet::from([( address.clone(), shell.storage.last_height, @@ -794,9 +765,6 @@ mod test_finalize_block { }; let digest = ethereum_events::VextDigest { - #[cfg(feature = "abcipp")] - signatures: vec![(address, ext.sig)].into_iter().collect(), - #[cfg(not(feature = "abcipp"))] signatures: vec![( (address, shell.storage.last_height), ext.sig, @@ -816,7 +784,54 @@ mod test_finalize_block { } }; - #[cfg(not(feature = "abcipp"))] + // ---- This protocol tx is accepted + let [result]: [Event; 1] = shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(result.event_type.to_string(), String::from("applied")); + let code = result.attributes.get("code").expect("Test failed").as_str(); + assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + + // --- The event is removed from the queue + assert!(shell.new_ethereum_events().is_empty()); + } + + /// Test that once a validator's vote for an Ethereum event lands + /// on-chain from a protocol tx, it dequeues from the + /// list of events to vote on. + #[test] + fn test_eth_events_dequeued_protocol_tx() { + let (mut shell, _, oracle) = setup(); + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed").clone(); + let address = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + + // ---- the ledger receives a new Ethereum event + let event = EthereumEvent::NewContract { + name: "Test".to_string(), + address: EthAddress([0; 20]), + }; + tokio_test::block_on(oracle.send(event.clone())).expect("Test failed"); + let [queued_event]: [EthereumEvent; 1] = + shell.new_ethereum_events().try_into().expect("Test failed"); + assert_eq!(queued_event, event); + + // ---- The protocol tx that includes this event on-chain + let ext = ethereum_events::Vext { + block_height: shell.storage.last_height, + ethereum_events: vec![event], + validator_addr: address, + } + .sign(&protocol_key); let processed_tx = ProcessedTx { tx: ProtocolTxType::EthEventsVext(ext) .sign(&protocol_key) diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index f6a064c9c4e..f4771f40fe8 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -1,15 +1,15 @@ +use namada::core::ledger::slash_fund::ADDRESS as slash_fund_address; use namada::ledger::events::EventType; -use namada::ledger::governance::storage as gov_storage; -use namada::ledger::governance::utils::{ +use namada::ledger::governance::{ + storage as gov_storage, ADDRESS as gov_address, +}; +use namada::ledger::native_vp::governance::utils::{ compute_tally, get_proposal_votes, ProposalEvent, }; -use namada::ledger::governance::vp::ADDRESS as gov_address; use namada::ledger::protocol; -use namada::ledger::slash_fund::ADDRESS as slash_fund_address; -use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::types::encode; -use namada::ledger::storage::{DBIter, DB}; -use namada::types::address::{nam, Address}; +use namada::ledger::storage::{DBIter, StorageHasher, DB}; +use namada::types::address::Address; use namada::types::governance::TallyResult; use namada::types::storage::Epoch; use namada::types::token; @@ -51,11 +51,12 @@ where })?; let votes = get_proposal_votes(&shell.storage, proposal_end_epoch, id); - let tally_result = - compute_tally(&shell.storage, proposal_end_epoch, votes); + let is_accepted = votes.and_then(|votes| { + compute_tally(&shell.storage, proposal_end_epoch, votes) + }); - let transfer_address = match tally_result { - TallyResult::Passed => { + let transfer_address = match is_accepted { + Ok(true) => { let proposal_author_key = gov_storage::get_author_key(id); let proposal_author = shell .read_storage_key::
(&proposal_author_key) @@ -163,7 +164,7 @@ where } } } - TallyResult::Rejected | TallyResult::Unknown => { + Ok(false) => { let proposal_event: Event = ProposalEvent::new( EventType::Proposal.to_string(), TallyResult::Rejected, @@ -175,14 +176,35 @@ where response.events.push(proposal_event); proposals_result.rejected.push(id); + slash_fund_address + } + Err(err) => { + tracing::error!( + "Unexpectedly failed to tally proposal ID {id} with error \ + {err}" + ); + let proposal_event: Event = ProposalEvent::new( + EventType::Proposal.to_string(), + TallyResult::Failed, + id, + false, + false, + ) + .into(); + response.events.push(proposal_event); + slash_fund_address } }; + let native_token = shell.storage.native_token.clone(); // transfer proposal locked funds - shell - .storage - .transfer(&nam(), funds, &gov_address, &transfer_address); + shell.storage.transfer( + &native_token, + funds, + &gov_address, + &transfer_address, + ); } Ok(proposals_result) diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index 6bc119a6e06..5b5203b9f31 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -2,7 +2,8 @@ use std::collections::HashMap; use std::hash::Hash; -use namada::ledger::pos::PosParams; +use namada::ledger::parameters::Parameters; +use namada::ledger::pos::{into_tm_voting_power, PosParams}; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; use namada::ledger::{ibc, pos}; @@ -69,7 +70,55 @@ where .expect("genesis time should be a valid timestamp") .into(); - genesis.parameters.init_storage(&mut self.storage); + // Initialize protocol parameters + let genesis::Parameters { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp_code_path, + implicit_vp_sha256, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + } = genesis.parameters; + // borrow necessary for release build, annoys clippy on dev build + #[allow(clippy::needless_borrow)] + let implicit_vp = + wasm_loader::read_wasm(&self.wasm_dir, &implicit_vp_code_path) + .map_err(Error::ReadingWasm)?; + // In dev, we don't check the hash + #[cfg(feature = "dev")] + let _ = implicit_vp_sha256; + #[cfg(not(feature = "dev"))] + { + let mut hasher = Sha256::new(); + hasher.update(&implicit_vp); + let vp_code_hash = hasher.finalize(); + assert_eq!( + vp_code_hash.as_slice(), + &implicit_vp_sha256, + "Invalid implicit account's VP sha256 hash for {}", + implicit_vp_code_path + ); + } + let parameters = Parameters { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + }; + parameters.init_storage(&mut self.storage); + + // Initialize governance parameters genesis.gov_params.init_storage(&mut self.storage); // configure the Ethereum bridge if the configuration is set. if let Some(config) = genesis.ethereum_bridge_params { @@ -78,11 +127,7 @@ where // Depends on parameters being initialized self.storage - .init_genesis_epoch( - initial_height, - genesis_time, - &genesis.parameters, - ) + .init_genesis_epoch(initial_height, genesis_time, ¶meters) .expect("Initializing genesis epoch must not fail"); // Loaded VP code cache to avoid loading the same files multiple times @@ -180,7 +225,7 @@ where ) { // Initialize genesis implicit for genesis::ImplicitAccount { public_key } in accounts { - let address: address::Address = (&public_key).into(); + let address: Address = (&public_key).into(); let pk_storage_key = pk_key(&address); self.storage .write(&pk_storage_key, public_key.try_to_vec().unwrap()) @@ -289,7 +334,7 @@ where // Account balance (tokens no staked in PoS) self.storage .write( - &token::balance_key(&address::nam(), addr), + &token::balance_key(&self.storage.native_token, addr), validator .non_staked_balance .try_to_vec() @@ -344,10 +389,10 @@ where sum: Some(key_to_tendermint(&consensus_key).unwrap()), }; abci_validator.pub_key = Some(pub_key); - let power: u64 = validator.pos_data.voting_power(pos_params).into(); - abci_validator.power = power - .try_into() - .expect("unexpected validator's voting power"); + abci_validator.power = into_tm_voting_power( + pos_params.tm_votes_per_token, + validator.pos_data.tokens, + ); response.validators.push(abci_validator); } response diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 8768f5d7e47..7da37d1bc17 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1,10 +1,10 @@ -//! The ledger shell connects the ABCI++ interface with the Anoma ledger app. +//! The ledger shell connects the ABCI++ interface with the Namada ledger app. //! //! Any changes applied before [`Shell::finalize_block`] might have to be //! reverted, so any changes applied in the methods [`Shell::prepare_proposal`] //! and [`Shell::process_proposal`] must be also reverted //! (unless we can simply overwrite them in the next block). -//! More info in . +//! More info in . mod finalize_block; mod governance; mod init_chain; @@ -34,8 +34,7 @@ use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{DBIter, Storage, DB}; use namada::ledger::{pos, protocol}; use namada::proto::{self, Tx}; -use namada::types::address; -use namada::types::address::{masp, masp_tx_key}; +use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; use namada::types::ethereum_events::EthereumEvent; use namada::types::key::*; @@ -154,7 +153,7 @@ pub type Result = std::result::Result; pub fn reset(config: config::Ledger) -> Result<()> { // simply nuke the DB files let db_path = &config.db_dir(); - match std::fs::remove_dir_all(&db_path) { + match std::fs::remove_dir_all(db_path) { Err(e) if e.kind() == std::io::ErrorKind::NotFound => (), res => res.map_err(Error::RemoveDB)?, }; @@ -225,7 +224,7 @@ impl EthereumReceiver { impl ShellMode { /// Get the validator address if ledger is in validator mode - pub fn get_validator_address(&self) -> Option<&address::Address> { + pub fn get_validator_address(&self) -> Option<&Address> { match &self { ShellMode::Validator { data, .. } => Some(&data.address), _ => None, @@ -347,6 +346,7 @@ where { /// Create a new shell from a path to a database and a chain id. Looks /// up the database with this data and tries to load the last state. + #[allow(clippy::too_many_arguments)] pub fn new( config: config::Ledger, wasm_dir: PathBuf, @@ -355,6 +355,7 @@ where db_cache: Option<&D::Cache>, vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, + native_token: Address, ) -> Self { let chain_id = config.chain_id; let db_path = config.shell.db_dir(&chain_id); @@ -364,10 +365,11 @@ where config.shell.storage_read_past_height_limit; if !Path::new(&base_dir).is_dir() { std::fs::create_dir(&base_dir) - .expect("Creating directory for Anoma should not fail"); + .expect("Creating directory for Namada should not fail"); } // load last state from storage - let mut storage = Storage::open(db_path, chain_id.clone(), db_cache); + let mut storage = + Storage::open(db_path, chain_id.clone(), native_token, db_cache); storage .load_last_state() .map_err(|e| { @@ -700,9 +702,9 @@ where tx_bytes: &[u8], r#_type: MempoolTxType, ) -> response::CheckTx { - use namada::types::transaction::protocol::{ - ProtocolTx, ProtocolTxType, - }; + use namada::types::transaction::protocol::ProtocolTx; + #[cfg(not(feature = "abcipp"))] + use namada::types::transaction::protocol::ProtocolTxType; let mut response = response::CheckTx::default(); const VALID_MSG: &str = "Mempool validation passed"; @@ -825,12 +827,9 @@ mod test_utils { use std::ops::{Deref, DerefMut}; use std::path::PathBuf; - #[cfg(not(feature = "abcipp"))] - use namada::ledger::pos::namada_proof_of_stake::types::VotingPower; use namada::ledger::storage::mockdb::MockDB; - use namada::ledger::storage::traits::Sha256Hasher; - use namada::ledger::storage::{BlockStateWrite, MerkleTree}; - use namada::types::address::{nam, EstablishedAddressGen}; + use namada::ledger::storage::{BlockStateWrite, MerkleTree, Sha256Hasher}; + use namada::types::address::{self, EstablishedAddressGen}; use namada::types::chain::ChainId; use namada::types::hash::Hash; use namada::types::key::*; @@ -841,7 +840,6 @@ mod test_utils { use tokio::sync::mpsc::{Sender, UnboundedReceiver}; use super::*; - #[cfg(feature = "abciplus")] use crate::facade::tendermint_proto::abci::{ RequestInitChain, RequestProcessProposal, }; @@ -981,6 +979,7 @@ mod test_utils { None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + address::nam(), ); shell.storage.last_height = height.into(); (Self { shell }, receiver, eth_sender) @@ -1050,8 +1049,8 @@ mod test_utils { /// Get the only validator's voting power. #[inline] #[cfg(not(feature = "abcipp"))] - pub fn get_validator_voting_power() -> VotingPower { - 200.into() + pub fn get_validator_bonded_stake() -> namada::types::token::Amount { + 200_000_000_000.into() } /// Start a new test shell and initialize it. Returns the shell paired with @@ -1108,6 +1107,7 @@ mod test_utils { tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB + let native_token = address::nam(); let mut shell = Shell::::new( config::Ledger::new( base_dir.clone(), @@ -1120,6 +1120,7 @@ mod test_utils { None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + native_token.clone(), ); let keypair = gen_keypair(); // enqueue a wrapper tx @@ -1130,7 +1131,7 @@ mod test_utils { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: native_token, }, &keypair, Epoch(0), @@ -1181,6 +1182,7 @@ mod test_utils { None, vp_wasm_compilation_cache, tx_wasm_compilation_cache, + address::nam(), ); assert!(!shell.storage.tx_queue.is_empty()); } diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index da4c1adf42b..60206fec72d 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -1,11 +1,11 @@ //! Implementation of the [`RequestPrepareProposal`] ABCI++ method for the Shell -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, DB}; #[cfg(feature = "abcipp")] -use namada::ledger::storage_api::queries::QueriesExt; +use namada::ledger::queries_ext::QueriesExt; #[cfg(feature = "abcipp")] -use namada::ledger::storage_api::queries::SendValsetUpd; +use namada::ledger::queries_ext::SendValsetUpd; +use namada::ledger::storage::traits::StorageHasher; +use namada::ledger::storage::{DBIter, DB}; use namada::proto::Tx; use namada::types::storage::BlockHeight; use namada::types::transaction::tx_types::TxType; @@ -13,13 +13,12 @@ use namada::types::transaction::wrapper::wrapper_tx::PairingEngine; use namada::types::transaction::{AffineCurve, DecryptedTx, EllipticCurve}; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::VoteExtensionDigest; +use shims::abcipp_shim_types::shim::response; use super::super::*; -use crate::facade::tendermint_proto::abci::RequestPrepareProposal; #[cfg(feature = "abcipp")] -use crate::facade::tendermint_proto::abci::{ - tx_record::TxAction, ExtendedCommitInfo, -}; +use crate::facade::tendermint_proto::abci::ExtendedCommitInfo; +use crate::facade::tendermint_proto::abci::RequestPrepareProposal; #[cfg(not(feature = "abcipp"))] use crate::node::ledger::shell::vote_extensions::deserialize_vote_extensions; #[cfg(feature = "abcipp")] @@ -56,7 +55,7 @@ where // add ethereum events and validator set updates as protocol txs #[cfg(feature = "abcipp")] - let txs = self.build_vote_extension_txs(req.local_last_commit); + let mut txs = self.build_vote_extension_txs(req.local_last_commit); #[cfg(not(feature = "abcipp"))] let mut txs = self.build_vote_extension_txs(&req.txs); @@ -79,17 +78,7 @@ where "Proposing block" ); - #[cfg(feature = "abcipp")] - { - response::PrepareProposal { - tx_records: txs, - ..Default::default() - } - } - #[cfg(not(feature = "abcipp"))] - { - response::PrepareProposal { txs } - } + response::PrepareProposal { txs } } /// Builds a batch of vote extension transactions, comprised of Ethereum @@ -179,12 +168,12 @@ where } /// Builds a batch of DKG decrypted transactions - // TODO: we won't have frontrunning protection until V2 of the Anoma + // TODO: we won't have frontrunning protection until V2 of the Namada // protocol; Namada runs V1, therefore this method is // essentially a NOOP, and ought to be removed // // sources: - // - https://specs.anoma.net/main/releases/v2.html + // - https://specs.namada.net/main/releases/v2.html // - https://github.com/anoma/ferveo fn build_decrypted_txs(&mut self) -> Vec { // TODO: This should not be hardcoded @@ -221,13 +210,10 @@ mod test_prepare_proposal { use std::collections::{BTreeSet, HashMap}; use borsh::{BorshDeserialize, BorshSerialize}; - use namada::ledger::pos::namada_proof_of_stake::types::{ - VotingPower, WeightedValidator, - }; + use namada::ledger::pos::namada_proof_of_stake::types::WeightedValidator; use namada::ledger::pos::namada_proof_of_stake::PosBase; - use namada::ledger::storage_api::queries::QueriesExt; + use namada::ledger::queries_ext::QueriesExt; use namada::proto::{Signed, SignedTxData}; - use namada::types::address::nam; use namada::types::ethereum_events::EthereumEvent; #[cfg(feature = "abcipp")] use namada::types::key::common; @@ -242,7 +228,7 @@ mod test_prepare_proposal { use super::*; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::{ - tx_record::TxAction, ExtendedCommitInfo, ExtendedVoteInfo, TxRecord, + ExtendedCommitInfo, ExtendedVoteInfo, }; use crate::node::ledger::shell::test_utils::{ self, gen_keypair, TestShell, @@ -309,11 +295,7 @@ mod test_prepare_proposal { ..Default::default() }; #[cfg(feature = "abcipp")] - assert_eq!( - // NOTE: we process mempool txs after protocol txs - shell.prepare_proposal(req).tx_records.remove(1), - record::remove(non_wrapper_tx.to_bytes()) - ); + assert_eq!(shell.prepare_proposal(req).txs.len(), 1); #[cfg(not(feature = "abcipp"))] assert_eq!(shell.prepare_proposal(req).txs.len(), 0); } @@ -400,7 +382,7 @@ mod test_prepare_proposal { assert_eq!( filtered_votes, vec![( - test_utils::get_validator_voting_power(), + test_utils::get_validator_bonded_stake(), signed_vote_extension )] ) @@ -501,13 +483,16 @@ mod test_prepare_proposal { event: ext.data.ethereum_events[0].clone(), signers: { let mut s = BTreeSet::new(); - s.insert(ext.data.validator_addr.clone()); + s.insert((ext.data.validator_addr.clone(), last_height)); s }, }]; let signatures = { let mut s = HashMap::new(); - s.insert(ext.data.validator_addr, ext.sig.clone()); + s.insert( + (ext.data.validator_addr.clone(), last_height), + ext.sig.clone(), + ); s }; @@ -568,12 +553,9 @@ mod test_prepare_proposal { ..Default::default() }); let rsp_digest = { - assert_eq!(rsp.tx_records.len(), 1); - let tx_record = rsp.tx_records.pop().unwrap(); - - assert_eq!(tx_record.action(), TxAction::Added); - - let got = Tx::try_from(&tx_record.tx[..]).unwrap(); + assert_eq!(rsp.txs.len(), 1); + let tx_bytes = rsp.txs.remove(0); + let got = Tx::try_from(tx_bytes.as_slice()).expect("Test failed"); let got_signed_tx = SignedTxData::try_from_slice(&got.data.unwrap()[..]).unwrap(); let protocol_tx = @@ -690,7 +672,7 @@ mod test_prepare_proposal { .iter() .cloned() .map(|v| WeightedValidator { - voting_power: VotingPower::from(0u64), + bonded_stake: 0, ..v }) .collect(); @@ -734,7 +716,7 @@ mod test_prepare_proposal { #[cfg(feature = "abcipp")] { let vote_extension = VoteExtension { - ethereum_events: signed_eth_ev_vote_extension.clone(), + ethereum_events: signed_eth_ev_vote_extension, validator_set_update: None, }; let vote = ExtendedVoteInfo { @@ -802,7 +784,7 @@ mod test_prepare_proposal { WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -824,11 +806,7 @@ mod test_prepare_proposal { ..Default::default() }; #[cfg(feature = "abcipp")] - assert_eq!( - // NOTE: we process mempool txs after protocol txs - shell.prepare_proposal(req).tx_records.remove(1), - record::remove(wrapper) - ); + assert_eq!(shell.prepare_proposal(req).txs.len(), 1); #[cfg(not(feature = "abcipp"))] assert_eq!(shell.prepare_proposal(req).txs.len(), 0); } @@ -860,7 +838,7 @@ mod test_prepare_proposal { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -881,50 +859,19 @@ mod test_prepare_proposal { .iter() .map(|tx| tx.data.clone().expect("Test failed")) .collect(); - #[cfg(feature = "abcipp")] - { - let received: Vec> = shell - .prepare_proposal(req) - .tx_records - .iter() - .filter_map( - |TxRecord { - tx: tx_bytes, - action, - }| { - if *action == (TxAction::Unmodified as i32) - || *action == (TxAction::Added as i32) - { - Some( - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed"), - ) - } else { - None - } - }, - ) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } - #[cfg(not(feature = "abcipp"))] - { - let received: Vec> = shell - .prepare_proposal(req) - .txs - .into_iter() - .map(|tx_bytes| { - Tx::try_from(tx_bytes.as_slice()) - .expect("Test failed") - .data - .expect("Test failed") - }) - .collect(); - // check that the order of the txs is correct - assert_eq!(received, expected_txs); - } + + let received: Vec> = shell + .prepare_proposal(req) + .txs + .into_iter() + .map(|tx_bytes| { + Tx::try_from(tx_bytes.as_slice()) + .expect("Test failed") + .data + .expect("Test failed") + }) + .collect(); + // check that the order of the txs is correct + assert_eq!(received, expected_txs); } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 4d8f0e61d5d..e9a92a9d6ac 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2,11 +2,7 @@ //! and [`RevertProposal`] ABCI++ methods for the Shell use data_encoding::HEXUPPER; -#[cfg(feature = "abcipp")] -use namada::ledger::pos::types::VotingPower; -use namada::ledger::storage_api::queries::QueriesExt; -#[cfg(feature = "abcipp")] -use namada::ledger::storage_api::queries::SendValsetUpd; +use namada::ledger::queries_ext::{QueriesExt, SendValsetUpd}; use namada::types::transaction::protocol::ProtocolTxType; #[cfg(feature = "abcipp")] use namada::types::voting_power::FractionalVotingPower; @@ -223,10 +219,9 @@ where /// If a vote extension is [`Some`], then it was validated properly, /// and the voting power of the validator who signed it is considered /// in the sum of the total voting power of all received vote extensions. - #[cfg(feature = "abcipp")] fn validate_vexts_in_proposal(&self, mut vote_extensions: I) -> TxResult where - I: Iterator>, + I: Iterator>, { #[cfg(feature = "abcipp")] let mut voting_power = FractionalVotingPower::default(); @@ -353,7 +348,6 @@ where .into(), }, TxType::Protocol(protocol_tx) => match protocol_tx.tx { - #[cfg(not(feature = "abcipp"))] ProtocolTxType::EthEventsVext(ext) => self .validate_eth_events_vext_and_get_it_back( ext, @@ -372,7 +366,6 @@ where vote extensions was invalid." .into(), }), - #[cfg(not(feature = "abcipp"))] ProtocolTxType::ValSetUpdateVext(ext) => self .validate_valset_upd_vext_and_get_it_back( ext, @@ -391,9 +384,11 @@ where update vote extensions was invalid." .into(), }), - #[cfg(feature = "abcipp")] ProtocolTxType::EthereumEvents(digest) => { - counters.eth_ev_digest_num += 1; + #[cfg(feature = "abcipp")] + { + counters.eth_ev_digest_num += 1; + } let extensions = digest.decompress(self.storage.last_height); let valid_extensions = @@ -403,7 +398,6 @@ where self.validate_vexts_in_proposal(valid_extensions) } - #[cfg(feature = "abcipp")] ProtocolTxType::ValidatorSetUpdate(digest) => { if !self.storage.can_send_validator_set_update( SendValsetUpd::AtPrevHeight, @@ -416,8 +410,10 @@ where .into(), }; } - - counters.valset_upd_digest_num += 1; + #[cfg(feature = "abcipp")] + { + counters.valset_upd_digest_num += 1; + } let extensions = digest.decompress(self.storage.last_height); @@ -546,12 +542,11 @@ mod test_process_proposal { use assert_matches::assert_matches; use borsh::BorshDeserialize; use namada::proto::SignedTxData; - use namada::types::address::nam; use namada::types::ethereum_events::EthereumEvent; use namada::types::hash::Hash; use namada::types::key::*; use namada::types::storage::Epoch; - use namada::types::token::Amount; + use namada::types::token; use namada::types::transaction::encrypted::EncryptedTx; use namada::types::transaction::{EncryptionKey, Fee}; use namada::types::vote_extensions::ethereum_events; @@ -580,13 +575,6 @@ mod test_process_proposal { ) .sign(protocol_key); ProtocolTxType::EthereumEvents(ethereum_events::VextDigest { - #[cfg(feature = "abcipp")] - signatures: { - let mut s = HashMap::new(); - s.insert(addr, ext.sig); - s - }, - #[cfg(not(feature = "abcipp"))] signatures: { let mut s = HashMap::new(); s.insert((addr, shell.storage.last_height), ext.sig); @@ -622,7 +610,10 @@ mod test_process_proposal { ethereum_events::VextDigest { signatures: { let mut s = HashMap::new(); - s.insert(validator_addr, signed_vote_extension.sig); + s.insert( + (validator_addr, shell.storage.last_height), + signed_vote_extension.sig, + ); s }, events: vec![], @@ -727,14 +718,17 @@ mod test_process_proposal { let vote_extension_digest = ethereum_events::VextDigest { signatures: { let mut s = HashMap::new(); - s.insert(addr.clone(), ext.sig); + s.insert( + (addr.clone(), shell.storage.last_height), + ext.sig, + ); s }, events: vec![MultiSignedEthEvent { event, signers: { let mut s = BTreeSet::new(); - s.insert(addr); + s.insert((addr, shell.storage.last_height)); s }, }], @@ -784,9 +778,6 @@ mod test_process_proposal { let vote_extension_digest = ethereum_events::VextDigest { signatures: { let mut s = HashMap::new(); - #[cfg(feature = "abcipp")] - s.insert(addr.clone(), ext.sig); - #[cfg(not(feature = "abcipp"))] s.insert((addr.clone(), INVALID_HEIGHT), ext.sig); s }, @@ -794,9 +785,6 @@ mod test_process_proposal { event, signers: { let mut s = BTreeSet::new(); - #[cfg(feature = "abcipp")] - s.insert(addr); - #[cfg(not(feature = "abcipp"))] s.insert((addr, INVALID_HEIGHT)); s }, @@ -846,9 +834,6 @@ mod test_process_proposal { let vote_extension_digest = ethereum_events::VextDigest { signatures: { let mut s = HashMap::new(); - #[cfg(feature = "abcipp")] - s.insert(addr.clone(), ext.sig); - #[cfg(not(feature = "abcipp"))] s.insert((addr.clone(), LAST_HEIGHT), ext.sig); s }, @@ -856,9 +841,6 @@ mod test_process_proposal { event, signers: { let mut s = BTreeSet::new(); - #[cfg(feature = "abcipp")] - s.insert(addr); - #[cfg(not(feature = "abcipp"))] s.insert((addr, LAST_HEIGHT)); s }, @@ -889,7 +871,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -915,7 +897,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -952,7 +934,7 @@ mod test_process_proposal { let mut wrapper = WrapperTx::new( Fee { amount: 100.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1004,6 +986,7 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![new_tx.to_bytes(), get_empty_eth_ev_digest(&shell)], }; + if let [resp, _] = shell .process_proposal(request) .expect("Test failed") @@ -1011,7 +994,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1052,7 +1035,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 1.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1074,7 +1057,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1114,8 +1097,8 @@ mod test_process_proposal { ); let wrapper = WrapperTx::new( Fee { - amount: Amount::whole(1_000_100), - token: nam(), + amount: token::Amount::whole(1_000_100), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1138,7 +1121,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1180,7 +1163,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: i.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1203,7 +1186,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1275,7 +1258,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1300,7 +1283,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1343,7 +1326,7 @@ mod test_process_proposal { let mut wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1371,7 +1354,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1406,7 +1389,7 @@ mod test_process_proposal { let wrapper = WrapperTx { fee: Fee { amount: 0.into(), - token: nam(), + token: shell.storage.native_token.clone(), }, pk: keypair.ref_to(), epoch: Epoch(0), @@ -1432,7 +1415,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] @@ -1509,7 +1492,7 @@ mod test_process_proposal { { resp.clone() } else { - panic!("Test failed") + panic!("Test failed"); } }; #[cfg(not(feature = "abcipp"))] diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 9217ffdc709..bd10832b8d3 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -1,6 +1,11 @@ //! Shell methods for querying state +use borsh::BorshDeserialize; +use ferveo_common::TendermintValidator; +use namada::ledger::pos::into_tm_voting_power; use namada::ledger::queries::{RequestCtx, ResponseQuery}; +use namada::types::key; +use namada::types::key::dkg_session_keys::DkgPublicKey; use super::*; use crate::node::ledger::response; @@ -54,14 +59,69 @@ where }, } } + + /// Lookup data about a validator from their protocol signing key + #[allow(dead_code)] + pub fn get_validator_from_protocol_pk( + &self, + pk: &common::PublicKey, + ) -> Option> { + let pk_bytes = pk + .try_to_vec() + .expect("Serializing public key should not fail"); + // get the current epoch + let (current_epoch, _) = self.storage.get_current_epoch(); + // get the PoS params + let pos_params = self.storage.read_pos_params(); + // get the active validator set + self.storage + .read_validator_set() + .get(current_epoch) + .expect("Validators for the next epoch should be known") + .active + .iter() + .find(|validator| { + let pk_key = key::protocol_pk_key(&validator.address); + match self.storage.read(&pk_key) { + Ok((Some(bytes), _)) => bytes == pk_bytes, + _ => false, + } + }) + .map(|validator| { + let dkg_key = + key::dkg_session_keys::dkg_pk_key(&validator.address); + let bytes = self + .storage + .read(&dkg_key) + .expect("Validator should have public dkg key") + .0 + .expect("Validator should have public dkg key"); + let dkg_publickey = + &::deserialize( + &mut bytes.as_ref(), + ) + .expect( + "DKG public key in storage should be deserializable", + ); + TendermintValidator { + power: into_tm_voting_power( + pos_params.tm_votes_per_token, + validator.bonded_stake, + ) as u64, + address: validator.address.to_string(), + public_key: dkg_publickey.into(), + } + }) + } } -// NOTE: we are testing `namada::ledger::storage_api::queries`, +// NOTE: we are testing `namada::ledger::queries_ext`, // which is not possible from `namada` since we do not have // access to the `Shell` there #[cfg(test)] +#[cfg(not(feature = "abcipp"))] mod test_queries { - use namada::ledger::storage_api::queries::{QueriesExt, SendValsetUpd}; + use namada::ledger::queries_ext::{QueriesExt, SendValsetUpd}; use namada::types::storage::Epoch; use super::*; diff --git a/apps/src/lib/node/ledger/shell/vote_extensions.rs b/apps/src/lib/node/ledger/shell/vote_extensions.rs index c2a7f132e9a..c3ba3542340 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -5,7 +5,7 @@ pub mod val_set_update; #[cfg(feature = "abcipp")] use borsh::BorshDeserialize; -use namada::ledger::storage_api::queries::{QueriesExt, SendValsetUpd}; +use namada::ledger::queries_ext::{QueriesExt, SendValsetUpd}; use namada::proto::Signed; use namada::types::transaction::protocol::ProtocolTxType; #[cfg(feature = "abcipp")] @@ -225,7 +225,7 @@ where ext, self.storage.get_current_decision_height(), ) - .then(|| true) + .then_some(true) .unwrap_or_else(|| { tracing::warn!( ?req.validator_address, @@ -244,17 +244,16 @@ where req: &request::VerifyVoteExtension, ext: Option, ) -> bool { - self.storage - .can_send_validator_set_update(SendValsetUpd::Now) - .then(|| { - ext.and_then(|ext| { + if let Some(ext) = ext { + self.storage + .can_send_validator_set_update(SendValsetUpd::Now) + .then(|| { // we have a valset update vext when we're expecting one, // cool, let's validate it self.validate_valset_upd_vext( ext, self.storage.get_current_decision_height(), ) - .then(|| true) }) .unwrap_or_else(|| { // either validation failed, or we were expecting a valset @@ -267,13 +266,12 @@ where ); false }) - }) - .unwrap_or({ - // NOTE: if we're not supposed to send a validator set update - // vote extension at a particular block height, we will - // just return true as the validation result - true - }) + } else { + // NOTE: if we're not supposed to send a validator set update + // vote extension at a particular block height, we will + // just return true as the validation result + true + } } } diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 2be4a2bdf4b..9bbb259cbd2 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -2,13 +2,13 @@ use std::collections::{BTreeMap, HashMap}; -use namada::ledger::pos::namada_proof_of_stake::types::VotingPower; +use namada::ledger::queries_ext::QueriesExt; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; -use namada::ledger::storage_api::queries::QueriesExt; use namada::proto::Signed; use namada::types::ethereum_events::EthereumEvent; use namada::types::storage::BlockHeight; +use namada::types::token; use namada::types::vote_extensions::ethereum_events::{ self, MultiSignedEthEvent, }; @@ -50,7 +50,7 @@ where ext: Signed, last_height: BlockHeight, ) -> std::result::Result< - (VotingPower, Signed), + (token::Amount, Signed), VoteExtensionError, > { #[cfg(feature = "abcipp")] @@ -165,7 +165,7 @@ where + 'iter, ) -> impl Iterator< Item = std::result::Result< - (VotingPower, Signed), + (token::Amount, Signed), VoteExtensionError, >, > + 'iter { @@ -184,7 +184,7 @@ where &'iter self, vote_extensions: impl IntoIterator> + 'iter, - ) -> impl Iterator)> + 'iter + ) -> impl Iterator)> + 'iter { self.validate_eth_events_vext_list(vote_extensions) .filter_map(|ext| ext.ok()) @@ -221,7 +221,6 @@ where self.filter_invalid_eth_events_vexts(vote_extensions) { let validator_addr = vote_extension.data.validator_addr; - #[cfg(not(feature = "abcipp"))] let block_height = vote_extension.data.block_height; // update voting power @@ -242,9 +241,6 @@ where for ev in vote_extension.data.ethereum_events { let signers = event_observers.entry(ev).or_insert_with(BTreeSet::new); - #[cfg(feature = "abcipp")] - signers.insert(validator_addr.clone()); - #[cfg(not(feature = "abcipp"))] signers.insert((validator_addr.clone(), block_height)); } @@ -252,16 +248,6 @@ where let addr = validator_addr.clone(); let sig = vote_extension.sig; - #[cfg(feature = "abcipp")] - if let Some(sig) = signatures.insert(addr, sig) { - tracing::warn!( - ?sig, - ?validator_addr, - "Overwrote old signature from validator while \ - constructing ethereum_events::VextDigest" - ); - } - let key = (addr, block_height); tracing::debug!( ?key, @@ -269,7 +255,6 @@ where ?validator_addr, "Inserting signature into ethereum_events::VextDigest" ); - #[cfg(not(feature = "abcipp"))] if let Some(existing_sig) = signatures.insert(key, sig.clone()) { tracing::warn!( ?sig, @@ -308,7 +293,7 @@ mod test_vote_extensions { use borsh::{BorshDeserialize, BorshSerialize}; use namada::ledger::pos; use namada::ledger::pos::namada_proof_of_stake::PosBase; - use namada::ledger::storage_api::queries::QueriesExt; + use namada::ledger::queries_ext::QueriesExt; use namada::types::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, }; diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 719476a42f1..79f8f837017 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -4,11 +4,11 @@ use std::collections::HashMap; use namada::ledger::pos::namada_proof_of_stake::PosBase; -use namada::ledger::pos::types::VotingPower; +use namada::ledger::queries_ext::QueriesExt; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; -use namada::ledger::storage_api::queries::QueriesExt; use namada::types::storage::BlockHeight; +use namada::types::token; use namada::types::vote_extensions::validator_set_update; #[cfg(feature = "abcipp")] use namada::types::voting_power::FractionalVotingPower; @@ -51,7 +51,7 @@ where ext: validator_set_update::SignedVext, last_height: BlockHeight, ) -> std::result::Result< - (VotingPower, validator_set_update::SignedVext), + (token::Amount, validator_set_update::SignedVext), VoteExtensionError, > { #[cfg(feature = "abcipp")] @@ -171,7 +171,7 @@ where + 'static, ) -> impl Iterator< Item = std::result::Result< - (VotingPower, validator_set_update::SignedVext), + (token::Amount, validator_set_update::SignedVext), VoteExtensionError, >, > + '_ { @@ -190,7 +190,7 @@ where &self, vote_extensions: impl IntoIterator + 'static, - ) -> impl Iterator + '_ + ) -> impl Iterator + '_ { self.validate_valset_upd_vext_list(vote_extensions) .filter_map(|ext| ext.ok()) @@ -234,7 +234,6 @@ where } let validator_addr = vote_extension.data.validator_addr; - #[cfg(not(feature = "abcipp"))] let block_height = vote_extension.data.block_height; // update voting power @@ -255,15 +254,6 @@ where let addr = validator_addr.clone(); let sig = vote_extension.sig; - #[cfg(feature = "abcipp")] - if let Some(sig) = signatures.insert(addr, sig) { - tracing::warn!( - ?sig, - ?validator_addr, - "Overwrote old signature from validator while \ - constructing validator_set_update::VextDigest" - ); - } let key = (addr, block_height); tracing::debug!( ?key, @@ -271,7 +261,6 @@ where ?validator_addr, "Inserting signature into validator_set_update::VextDigest" ); - #[cfg(not(feature = "abcipp"))] if let Some(existing_sig) = signatures.insert(key, sig.clone()) { tracing::warn!( ?sig, @@ -318,7 +307,7 @@ mod test_vote_extensions { use borsh::BorshSerialize; use namada::ledger::pos; use namada::ledger::pos::namada_proof_of_stake::PosBase; - use namada::ledger::storage_api::queries::QueriesExt; + use namada::ledger::queries_ext::QueriesExt; use namada::types::key::RefTo; #[cfg(feature = "abcipp")] use namada::types::vote_extensions::ethereum_events; diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index b3e143c2cad..3f351b89b39 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -5,6 +5,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; +use namada::types::address::Address; use namada::types::ethereum_events::EthereumEvent; #[cfg(not(feature = "abcipp"))] use namada::types::hash::Hash; @@ -44,6 +45,7 @@ pub struct AbcippShim { impl AbcippShim { /// Create a shell with a ABCI service that passes messages to and from the /// shell. + #[allow(clippy::too_many_arguments)] pub fn new( config: config::Ledger, wasm_dir: PathBuf, @@ -52,6 +54,7 @@ impl AbcippShim { db_cache: &rocksdb::Cache, vp_wasm_compilation_cache: u64, tx_wasm_compilation_cache: u64, + native_token: Address, ) -> (Self, AbciService) { // We can use an unbounded channel here, because tower-abci limits the // the number of requests that can come in @@ -66,6 +69,7 @@ impl AbcippShim { Some(db_cache), vp_wasm_compilation_cache, tx_wasm_compilation_cache, + native_token, ), #[cfg(not(feature = "abcipp"))] begin_block_request: None, diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs index 05a4c36334a..4e6b83dbbf2 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs @@ -15,8 +15,7 @@ pub mod shim { RequestProcessProposal, RequestQuery, ResponseApplySnapshotChunk, ResponseCheckTx, ResponseCommit, ResponseEcho, ResponseFlush, ResponseInfo, ResponseInitChain, ResponseListSnapshots, - ResponseLoadSnapshotChunk, ResponseOfferSnapshot, - ResponsePrepareProposal, ResponseQuery, + ResponseLoadSnapshotChunk, ResponseOfferSnapshot, ResponseQuery, }; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::{ @@ -120,7 +119,7 @@ pub mod shim { InitChain(ResponseInitChain), Info(ResponseInfo), Query(ResponseQuery), - PrepareProposal(ResponsePrepareProposal), + PrepareProposal(response::PrepareProposal), VerifyHeader(response::VerifyHeader), ProcessProposal(response::ProcessProposal), RevertProposal(response::RevertProposal), @@ -167,7 +166,7 @@ pub mod shim { Ok(Resp::ApplySnapshotChunk(inner)) } Response::PrepareProposal(inner) => { - Ok(Resp::PrepareProposal(inner)) + Ok(Resp::PrepareProposal(inner.into())) } #[cfg(feature = "abcipp")] Response::ExtendVote(inner) => Ok(Resp::ExtendVote(inner)), @@ -263,7 +262,8 @@ pub mod shim { use namada::ledger::events::EventLevel; use crate::facade::tendermint_proto::abci::{ - Event as TmEvent, ResponseProcessProposal, ValidatorUpdate, + Event as TmEvent, ResponsePrepareProposal, ResponseProcessProposal, + ValidatorUpdate, }; #[cfg(not(feature = "abcipp"))] use crate::facade::tendermint_proto::types::ConsensusParams; @@ -273,6 +273,26 @@ pub mod shim { types::ConsensusParams, }; + #[derive(Debug, Default)] + pub struct PrepareProposal { + pub txs: Vec, + } + + #[cfg(feature = "abcipp")] + impl From for ResponsePrepareProposal { + fn from(_: PrepareProposal) -> Self { + // TODO(namada#198): When abci++ arrives, we should return a + // real response. + Self::default() + } + } + + #[cfg(not(feature = "abcipp"))] + impl From for ResponsePrepareProposal { + fn from(resp: PrepareProposal) -> Self { + Self { txs: resp.txs } + } + } #[derive(Debug, Default)] pub struct VerifyHeader; diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index 0f1e42d9ce8..4a275a15bf2 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -46,12 +46,13 @@ impl fmt::Debug for PersistentStorageHasher { } fn new_blake2b() -> Blake2b { - Blake2bBuilder::new(32).personal(b"anoma storage").build() + Blake2bBuilder::new(32).personal(b"namada storage").build() } #[cfg(test)] mod tests { use namada::ledger::storage::types; + use namada::types::address; use namada::types::chain::ChainId; use namada::types::storage::{BlockHash, BlockHeight, Key}; use proptest::collection::vec; @@ -65,8 +66,12 @@ mod tests { fn test_crud_value() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; let value_bytes = types::encode(&value); @@ -89,7 +94,7 @@ mod tests { assert_eq!(gas, key.len() as u64); let (result, gas) = storage.read(&key).expect("read failed"); let read_value: u64 = - types::decode(&result.expect("value doesn't exist")) + types::decode(result.expect("value doesn't exist")) .expect("decoding failed"); assert_eq!(read_value, value); assert_eq!(gas, key.len() as u64 + value_bytes_len as u64); @@ -108,8 +113,12 @@ mod tests { fn test_commit_block() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); @@ -130,8 +139,12 @@ mod tests { drop(storage); // load the last state - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .load_last_state() .expect("loading the last state failed"); @@ -149,8 +162,12 @@ mod tests { fn test_iter() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); @@ -189,8 +206,12 @@ mod tests { fn test_validity_predicate() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); storage .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); @@ -243,8 +264,12 @@ mod tests { ) -> namada::ledger::storage::Result<()> { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = - PersistentStorage::open(db_path.path(), ChainId::default(), None); + let mut storage = PersistentStorage::open( + db_path.path(), + ChainId::default(), + address::nam(), + None, + ); // 1. For each `blocks_write_value`, write the current block height if // true or delete otherwise. diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 60cafa37c24..e0c7581ea0f 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -37,9 +37,9 @@ use namada::ledger::storage::{ types, BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, Error, MerkleTreeStoresRead, Result, StoreType, DB, }; +use namada::types::internal::TxQueue; use namada::types::storage::{ - BlockHeight, BlockResults, Header, Key, KeySeg, TxQueue, - KEY_SEGMENT_SEPARATOR, + BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; use namada::types::time::DateTimeUtc; use rocksdb::{ @@ -53,7 +53,7 @@ use crate::config::utils::num_of_threads; /// Env. var to set a number of Rayon global worker threads const ENV_VAR_ROCKSDB_COMPACTION_THREADS: &str = - "ANOMA_ROCKSDB_COMPACTION_THREADS"; + "NAMADA_ROCKSDB_COMPACTION_THREADS"; /// RocksDB handle #[derive(Debug)] @@ -758,7 +758,7 @@ impl DB for RocksDB { // Write the new key-val self.0 - .put(&subspace_key.to_string(), value) + .put(subspace_key.to_string(), value) .map_err(|e| Error::DBError(e.into_string()))?; Ok(size_diff) diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index dc932f0ada4..2fdc6ce4a94 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -26,7 +26,7 @@ use crate::facade::tendermint_config::{ }; /// Env. var to output Tendermint log to stdout -pub const ENV_VAR_TM_STDOUT: &str = "ANOMA_TM_STDOUT"; +pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_TM_STDOUT"; #[cfg(feature = "abciplus")] pub const VERSION_REQUIREMENTS: &str = ">= 0.37.0-alpha.2, <0.38.0"; @@ -49,7 +49,7 @@ async fn get_version(tendermint_path: &str) -> eyre::Result { /// Runs `tendermint version` and returns the output as a string async fn run_version_command(tendermint_path: &str) -> eyre::Result { - let output = Command::new(&tendermint_path) + let output = Command::new(tendermint_path) .arg("version") .output() .await?; @@ -160,7 +160,7 @@ pub async fn run( // init and run a tendermint node child process let output = Command::new(&tendermint_path) - .args(&["init", &mode, "--home", &home_dir_string]) + .args(["init", &mode, "--home", &home_dir_string]) .output() .await .map_err(Error::Init)?; @@ -184,7 +184,7 @@ pub async fn run( update_tendermint_config(&home_dir, config).await?; let mut tendermint_node = Command::new(&tendermint_path); - tendermint_node.args(&[ + tendermint_node.args([ "start", "--proxy_app", &ledger_address, @@ -244,7 +244,7 @@ pub fn reset(tendermint_dir: impl AsRef) -> Result<()> { let tendermint_dir = tendermint_dir.as_ref().to_string_lossy(); // reset all the Tendermint state, if any std::process::Command::new(tendermint_path) - .args(&[ + .args([ "reset-state", "unsafe-all", // NOTE: log config: https://docs.tendermint.com/master/nodes/logging.html#configuring-log-levels diff --git a/apps/src/lib/wallet/alias.rs b/apps/src/lib/wallet/alias.rs index 25fcf03d116..13d977b8524 100644 --- a/apps/src/lib/wallet/alias.rs +++ b/apps/src/lib/wallet/alias.rs @@ -97,11 +97,6 @@ pub fn validator_consensus_key(validator_alias: &Alias) -> Alias { format!("{validator_alias}-consensus-key").into() } -/// Default alias of a validator's staking rewards key -pub fn validator_rewards_key(validator_alias: &Alias) -> Alias { - format!("{validator_alias}-rewards-key").into() -} - /// Default alias of a validator's Tendermint node key pub fn validator_tendermint_node_key(validator_alias: &Alias) -> Alias { format!("{validator_alias}-tendermint-node-key").into() diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index 9fbda9b76bc..df251da5898 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -19,7 +19,7 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { let mut addresses: Vec<(Alias, Address)> = vec![ ("pos".into(), pos::ADDRESS), ("pos_slash_pool".into(), pos::SLASH_POOL_ADDRESS), - ("governance".into(), governance::vp::ADDRESS), + ("governance".into(), governance::ADDRESS), ("eth_bridge".into(), eth_bridge::ADDRESS), ]; // Genesis validators @@ -33,16 +33,14 @@ pub fn addresses_from_genesis(genesis: GenesisConfig) -> Vec<(Alias, Address)> { }); addresses.extend(validator_addresses); // Genesis tokens - if let Some(accounts) = genesis.token { - let token_addresses = accounts.into_iter().map(|(alias, token)| { - // The address must be set in the genesis config file - ( - alias.into(), - Address::decode(token.address.unwrap()).unwrap(), - ) - }); - addresses.extend(token_addresses); - } + let token_addresses = genesis.token.into_iter().map(|(alias, token)| { + // The address must be set in the genesis config file + ( + alias.into(), + Address::decode(token.address.unwrap()).unwrap(), + ) + }); + addresses.extend(token_addresses); // Genesis established accounts if let Some(accounts) = genesis.established { let est_addresses = accounts.into_iter().map(|(alias, established)| { @@ -127,7 +125,7 @@ mod dev { let mut addresses: Vec<(Alias, Address)> = vec![ ("pos".into(), pos::ADDRESS), ("pos_slash_pool".into(), pos::SLASH_POOL_ADDRESS), - ("governance".into(), governance::vp::ADDRESS), + ("governance".into(), governance::ADDRESS), ("validator".into(), validator_address()), ("albert".into(), albert_address()), ("bertha".into(), bertha_address()), diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index 37d8cc3a63c..c6a806912fb 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -558,10 +558,10 @@ pub fn read_and_confirm_pwd(unsafe_dont_encrypt: bool) -> Option { /// Read the password for encryption/decryption from the file/env/stdin. Panics /// if all options are empty/invalid. pub fn read_password(prompt_msg: &str) -> String { - let pwd = match env::var("ANOMA_WALLET_PASSWORD_FILE") { + let pwd = match env::var("NAMADA_WALLET_PASSWORD_FILE") { Ok(path) => fs::read_to_string(path) .expect("Something went wrong reading the file"), - Err(_) => match env::var("ANOMA_WALLET_PASSWORD") { + Err(_) => match env::var("NAMADA_WALLET_PASSWORD") { Ok(password) => password, Err(_) => rpassword::read_password_from_tty(Some(prompt_msg)) .unwrap_or_default(), diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 2b665d7546a..9b7572d8b8b 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -43,8 +43,6 @@ pub struct ValidatorWallet { pub eth_cold_key: common::SecretKey, /// Cryptographic keypair for eth hot key pub eth_hot_key: common::SecretKey, - /// Cryptographic keypair for rewards key - pub rewards_key: common::SecretKey, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: common::SecretKey, } @@ -59,8 +57,6 @@ pub struct ValidatorStore { pub consensus_key: wallet::StoredKeypair, /// Cryptographic keypair for eth cold key pub eth_cold_key: wallet::StoredKeypair, - /// Cryptographic keypair for rewards key - pub rewards_key: wallet::StoredKeypair, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: wallet::StoredKeypair, /// Special validator keys. Contains the ETH hot key. @@ -112,7 +108,6 @@ impl ValidatorWallet { let password = if store.account_key.is_encrypted() || store.consensus_key.is_encrypted() - || store.rewards_key.is_encrypted() || store.account_key.is_encrypted() { Some(wallet::read_password("Enter decryption password: ")) @@ -128,9 +123,6 @@ impl ValidatorWallet { store.eth_cold_key.get(true, password.clone())?; let eth_hot_key = store.validator_keys.eth_bridge_keypair.clone(); - - let rewards_key = - store.rewards_key.get(true, password.clone())?; let tendermint_node_key = store.tendermint_node_key.get(true, password)?; @@ -140,7 +132,6 @@ impl ValidatorWallet { consensus_key, eth_cold_key, eth_hot_key, - rewards_key, tendermint_node_key, }) } @@ -163,8 +154,6 @@ impl ValidatorWallet { ); let (eth_cold_key, eth_cold_sk) = gen_key_to_store(SchemeType::Secp256k1, &password); - - let (rewards_key, rewards_sk) = gen_key_to_store(scheme, &password); let (tendermint_node_key, tendermint_node_sk) = gen_key_to_store( // Note that TM only allows ed25519 for node IDs SchemeType::Ed25519, @@ -177,7 +166,6 @@ impl ValidatorWallet { account_key, consensus_key, eth_cold_key, - rewards_key, tendermint_node_key, validator_keys, }; @@ -187,7 +175,6 @@ impl ValidatorWallet { consensus_key: consensus_sk, eth_cold_key: eth_cold_sk, eth_hot_key, - rewards_key: rewards_sk, tendermint_node_key: tendermint_node_sk, } } diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index d7af231d86b..6606486f831 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -64,7 +64,7 @@ pub struct Store { payment_addrs: HashMap, /// Cryptographic keypairs keys: HashMap>, - /// Anoma address book + /// Namada address book addresses: BiHashMap, /// Known mappings of public key hashes to their aliases in the `keys` /// field. Used for look-up by a public key. @@ -435,7 +435,7 @@ impl Store { if alias.is_empty() { println!( "Empty alias given, defaulting to {}.", - alias = Into::::into(pkh.to_string()) + Into::::into(pkh.to_string()) ); } // Addresses and keypairs can share aliases, so first remove any @@ -587,10 +587,7 @@ impl Store { address: Address, ) -> Option { if alias.is_empty() { - println!( - "Empty alias given, defaulting to {}.", - alias = address.encode() - ); + println!("Empty alias given, defaulting to {}.", address.encode()); } // Addresses and keypairs can share aliases, so first remove any keys // sharing the same namesake before checking if alias has been used. @@ -645,7 +642,6 @@ impl Store { other: pre_genesis::ValidatorWallet, ) { let account_key_alias = alias::validator_key(&validator_alias); - let rewards_key_alias = alias::validator_rewards_key(&validator_alias); let consensus_key_alias = alias::validator_consensus_key(&validator_alias); let tendermint_node_key_alias = @@ -653,7 +649,6 @@ impl Store { let keys = [ (account_key_alias.clone(), other.store.account_key), - (rewards_key_alias.clone(), other.store.rewards_key), (consensus_key_alias.clone(), other.store.consensus_key), ( tendermint_node_key_alias.clone(), @@ -663,12 +658,10 @@ impl Store { self.keys.extend(keys.into_iter()); let account_pk = other.account_key.ref_to(); - let rewards_pk = other.rewards_key.ref_to(); let consensus_pk = other.consensus_key.ref_to(); let tendermint_node_pk = other.tendermint_node_key.ref_to(); let addresses = [ (account_key_alias.clone(), (&account_pk).into()), - (rewards_key_alias.clone(), (&rewards_pk).into()), (consensus_key_alias.clone(), (&consensus_pk).into()), ( tendermint_node_key_alias.clone(), @@ -679,7 +672,6 @@ impl Store { let pkhs = [ ((&account_pk).into(), account_key_alias), - ((&rewards_pk).into(), rewards_key_alias), ((&consensus_pk).into(), consensus_key_alias), ((&tendermint_node_pk).into(), tendermint_node_key_alias), ]; diff --git a/apps/src/lib/wasm_loader/mod.rs b/apps/src/lib/wasm_loader/mod.rs index e82bb924524..9a075fbcf88 100644 --- a/apps/src/lib/wasm_loader/mod.rs +++ b/apps/src/lib/wasm_loader/mod.rs @@ -112,7 +112,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { // If the checksums file doesn't exists ... if tokio::fs::canonicalize(&checksums_path).await.is_err() { tokio::fs::create_dir_all(&wasm_directory).await.unwrap(); - // ... try to copy checksums from the Anoma WASM root dir + // ... try to copy checksums from the Namada WASM root dir if tokio::fs::copy( std::env::current_dir() .unwrap() @@ -161,7 +161,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { ); #[cfg(feature = "dev")] { - // try to copy built file from the Anoma WASM root dir + // try to copy built file from the Namada WASM root dir if tokio::fs::copy( std::env::current_dir() .unwrap() @@ -204,7 +204,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { std::io::ErrorKind::NotFound => { #[cfg(feature = "dev")] { - // try to copy built file from the Anoma WASM root + // try to copy built file from the Namada WASM root // dir if tokio::fs::copy( std::env::current_dir() diff --git a/core/Cargo.toml b/core/Cargo.toml new file mode 100644 index 00000000000..689d5171e67 --- /dev/null +++ b/core/Cargo.toml @@ -0,0 +1,116 @@ +[package] +authors = ["Heliax AG "] +edition = "2021" +license = "GPL-3.0" +name = "namada_core" +resolver = "2" +version = "0.11.0" + +[features] +default = [] +ferveo-tpke = [ + "ferveo", + "tpke", + "ark-ec", + "rand_core", + "rand", +] +wasm-runtime = [ + "rayon", +] +# secp256k1 key signing and verification, disabled in WASM build by default as +# it bloats the build a lot +secp256k1-sign-verify = [ + "libsecp256k1/hmac", +] + +abcipp = [ + "ibc-proto-abcipp", + "ibc-abcipp", + "tendermint-abcipp", + "tendermint-proto-abcipp" +] +abciplus = [ + "ibc", + "ibc-proto", + "tendermint", + "tendermint-proto", +] + +ibc-mocks = [ + "ibc/mocks", +] +ibc-mocks-abcipp = [ + "ibc-abcipp/mocks", +] + +# for integration tests and test utilies +testing = [ + "rand", + "rand_core", + "proptest", +] + +[dependencies] +ark-bls12-381 = {version = "0.3"} +ark-ec = {version = "0.3", optional = true} +ark-serialize = {version = "0.3"} +# We switch off "blake2b" because it cannot be compiled to wasm +# branch = "bat/arse-merkle-tree" +arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +bech32 = "0.8.0" +bellman = "0.11.2" +bit-vec = "0.6.3" +borsh = "0.9.0" +chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} +data-encoding = "2.3.2" +derivative = "2.2.0" +ed25519-consensus = "1.2.0" +ethabi = "17.0.0" +eyre = "0.6.8" +ferveo = {optional = true, git = "https://github.com/anoma/ferveo"} +ferveo-common = {git = "https://github.com/anoma/ferveo"} +tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo"} +# TODO using the same version of tendermint-rs as we do here. +ibc = {version = "0.14.0", default-features = false, optional = true} +ibc-proto = {version = "0.17.1", default-features = false, optional = true} +ibc-abcipp = {package = "ibc", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} +ibc-proto-abcipp = {package = "ibc-proto", git = "https://github.com/heliaxdev/ibc-rs", rev = "9fcc1c8c19db6af50806ffe5b2f6c214adcbfd5d", default-features = false, optional = true} +ics23 = "0.7.0" +itertools = "0.10.0" +libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} +masp_primitives = { git = "https://github.com/anoma/masp", rev = "bee40fc465f6afbd10558d12fe96eb1742eee45c" } +num-rational = "0.4.1" +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} +prost = "0.9.0" +prost-types = "0.9.0" +rand = {version = "0.8", optional = true} +rand_core = {version = "0.6", optional = true} +rayon = {version = "=1.5.3", optional = true} +rust_decimal = { version = "1.26.1", features = ["borsh"] } +rust_decimal_macros = "1.26.1" +serde = {version = "1.0.125", features = ["derive"]} +serde_json = "1.0.62" +sha2 = "0.9.3" +tendermint = {version = "0.23.6", optional = true} +tendermint-proto = {version = "0.23.6", optional = true} +tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +thiserror = "1.0.30" +tiny-keccak = {version = "2.0.2", features = ["keccak"]} +tracing = "0.1.30" +zeroize = {version = "1.5.5", features = ["zeroize_derive"]} + +[dev-dependencies] +assert_matches = "1.5.0" +libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} +pretty_assertions = "0.7.2" +# A fork with state machine testing +proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} +rand = {version = "0.8"} +rand_core = {version = "0.6"} +test-log = {version = "0.2.7", default-features = false, features = ["trace"]} +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[build-dependencies] +tonic-build = "0.6.0" diff --git a/core/build.rs b/core/build.rs new file mode 100644 index 00000000000..c5b251c5191 --- /dev/null +++ b/core/build.rs @@ -0,0 +1,50 @@ +use std::fs::read_to_string; +use std::process::Command; +use std::{env, str}; + +/// Path to the .proto source files, relative to `core` directory +const PROTO_SRC: &str = "./proto"; + +/// The version should match the one we use in the `Makefile` +const RUSTFMT_TOOLCHAIN_SRC: &str = "../rust-nightly-version"; + +fn main() { + if let Ok(val) = env::var("COMPILE_PROTO") { + if val.to_ascii_lowercase() == "false" { + // Skip compiling proto files + return; + } + } + + // Tell Cargo that if the given file changes, to rerun this build script. + println!("cargo:rerun-if-changed={}", PROTO_SRC); + + let mut use_rustfmt = false; + + // The version should match the one we use in the `Makefile` + if let Ok(rustfmt_toolchain) = read_to_string(RUSTFMT_TOOLCHAIN_SRC) { + // Try to find the path to rustfmt. + if let Ok(output) = Command::new("rustup") + .args(["which", "rustfmt", "--toolchain", rustfmt_toolchain.trim()]) + .output() + { + if let Ok(rustfmt) = str::from_utf8(&output.stdout) { + // Set the command to be used by tonic_build below to format the + // generated files + let rustfmt = rustfmt.trim(); + if !rustfmt.is_empty() { + println!("using rustfmt from path \"{}\"", rustfmt); + env::set_var("RUSTFMT", rustfmt); + use_rustfmt = true + } + } + } + } + + tonic_build::configure() + .out_dir("src/proto/generated") + .format(use_rustfmt) + .protoc_arg("--experimental_allow_proto3_optional") + .compile(&[format!("{}/types.proto", PROTO_SRC)], &[PROTO_SRC]) + .unwrap(); +} diff --git a/shared/proto b/core/proto similarity index 100% rename from shared/proto rename to core/proto diff --git a/shared/src/bytes.rs b/core/src/bytes.rs similarity index 100% rename from shared/src/bytes.rs rename to core/src/bytes.rs diff --git a/core/src/ledger/eth_bridge/mod.rs b/core/src/ledger/eth_bridge/mod.rs new file mode 100644 index 00000000000..5932c2c8d79 --- /dev/null +++ b/core/src/ledger/eth_bridge/mod.rs @@ -0,0 +1,11 @@ +//! Storage keys for the Ethereum bridge account + +pub mod storage; + +use crate::types::address::{Address, InternalAddress}; + +/// The [`InternalAddress`] of the Ethereum bridge account +pub const INTERNAL_ADDRESS: InternalAddress = InternalAddress::EthBridge; + +/// The [`Address`] of the Ethereum bridge account +pub const ADDRESS: Address = Address::Internal(INTERNAL_ADDRESS); diff --git a/shared/src/ledger/eth_bridge/storage/bridge_pool.rs b/core/src/ledger/eth_bridge/storage/bridge_pool.rs similarity index 100% rename from shared/src/ledger/eth_bridge/storage/bridge_pool.rs rename to core/src/ledger/eth_bridge/storage/bridge_pool.rs diff --git a/core/src/ledger/eth_bridge/storage/mod.rs b/core/src/ledger/eth_bridge/storage/mod.rs new file mode 100644 index 00000000000..60160710e1a --- /dev/null +++ b/core/src/ledger/eth_bridge/storage/mod.rs @@ -0,0 +1,109 @@ +//! Functionality for accessing the storage subspace +pub mod bridge_pool; +pub mod wrapped_erc20s; + +use super::ADDRESS; +use crate::types::address::nam; +use crate::types::storage::{DbKeySeg, Key, KeySeg}; +use crate::types::token::balance_key; + +/// Sub-key for storing the minimum confirmations parameter +pub const MIN_CONFIRMATIONS_SUBKEY: &str = "min_confirmations"; +/// Sub-key for storing the Ethereum address for wNam. +pub const NATIVE_ERC20_SUBKEY: &str = "native_erc20"; +/// Sub-lkey for storing the Ethereum address of the bridge contract. +pub const BRIDGE_CONTRACT_SUBKEY: &str = "bridge_contract_address"; +/// Sub-key for storing the Ethereum address of the governance contract. +pub const GOVERNANCE_CONTRACT_SUBKEY: &str = "governance_contract_address"; + +/// Key prefix for the storage subspace +pub fn prefix() -> Key { + Key::from(ADDRESS.to_db_key()) +} + +/// The key to the escrow of the VP. +pub fn escrow_key() -> Key { + balance_key(&nam(), &ADDRESS) +} + +/// Returns whether a key belongs to this account or not +pub fn is_eth_bridge_key(key: &Key) -> bool { + key == &escrow_key() + || matches!(key.segments.get(0), Some(first_segment) if first_segment == &ADDRESS.to_db_key()) +} + +/// Storage key for the minimum confirmations parameter. +pub fn min_confirmations_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(MIN_CONFIRMATIONS_SUBKEY.into()), + ], + } +} + +/// Storage key for the Ethereum address of wNam. +pub fn native_erc20_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(NATIVE_ERC20_SUBKEY.into()), + ], + } +} + +/// Storage key for the Ethereum address of the bridge contract. +pub fn bridge_contract_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(BRIDGE_CONTRACT_SUBKEY.into()), + ], + } +} + +/// Storage key for the Ethereum address of the governance contract. +pub fn governance_contract_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(GOVERNANCE_CONTRACT_SUBKEY.into()), + ], + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::types::address; + + #[test] + fn test_is_eth_bridge_key_returns_true_for_eth_bridge_address() { + let key = Key::from(super::ADDRESS.to_db_key()); + assert!(is_eth_bridge_key(&key)); + } + + #[test] + fn test_is_eth_bridge_key_returns_true_for_eth_bridge_subkey() { + let key = Key::from(super::ADDRESS.to_db_key()) + .push(&"arbitrary key segment".to_owned()) + .expect("Could not set up test"); + assert!(is_eth_bridge_key(&key)); + } + + #[test] + fn test_is_eth_bridge_key_returns_false_for_different_address() { + let key = + Key::from(address::testing::established_address_1().to_db_key()); + assert!(!is_eth_bridge_key(&key)); + } + + #[test] + fn test_is_eth_bridge_key_returns_false_for_different_address_subkey() { + let key = + Key::from(address::testing::established_address_1().to_db_key()) + .push(&"arbitrary key segment".to_owned()) + .expect("Could not set up test"); + assert!(!is_eth_bridge_key(&key)); + } +} diff --git a/shared/src/ledger/eth_bridge/storage/wrapped_erc20s.rs b/core/src/ledger/eth_bridge/storage/wrapped_erc20s.rs similarity index 100% rename from shared/src/ledger/eth_bridge/storage/wrapped_erc20s.rs rename to core/src/ledger/eth_bridge/storage/wrapped_erc20s.rs diff --git a/shared/src/ledger/gas.rs b/core/src/ledger/gas.rs similarity index 99% rename from shared/src/ledger/gas.rs rename to core/src/ledger/gas.rs index c7da7b132c1..99eb606b7b1 100644 --- a/shared/src/ledger/gas.rs +++ b/core/src/ledger/gas.rs @@ -208,7 +208,7 @@ impl VpsGas { let parallel_gas = self.rest.iter().sum::() / PARALLEL_GAS_DIVIDER; self.max .unwrap_or_default() - .checked_add(parallel_gas as u64) + .checked_add(parallel_gas) .ok_or(Error::GasOverflow) } } diff --git a/core/src/ledger/governance/mod.rs b/core/src/ledger/governance/mod.rs new file mode 100644 index 00000000000..8e3fb977f3d --- /dev/null +++ b/core/src/ledger/governance/mod.rs @@ -0,0 +1,11 @@ +//! Governance library code + +use crate::types::address::{Address, InternalAddress}; + +/// governance parameters +pub mod parameters; +/// governance storage +pub mod storage; + +/// The governance internal address +pub const ADDRESS: Address = Address::Internal(InternalAddress::Governance); diff --git a/shared/src/ledger/governance/parameters.rs b/core/src/ledger/governance/parameters.rs similarity index 100% rename from shared/src/ledger/governance/parameters.rs rename to core/src/ledger/governance/parameters.rs diff --git a/shared/src/ledger/governance/storage.rs b/core/src/ledger/governance/storage.rs similarity index 84% rename from shared/src/ledger/governance/storage.rs rename to core/src/ledger/governance/storage.rs index 9d2f0a4e4a3..fb4ecaf76b4 100644 --- a/shared/src/ledger/governance/storage.rs +++ b/core/src/ledger/governance/storage.rs @@ -1,4 +1,4 @@ -use super::vp::ADDRESS; +use crate::ledger::governance::ADDRESS; use crate::types::address::Address; use crate::types::storage::{DbKeySeg, Key, KeySeg}; @@ -175,121 +175,74 @@ pub fn is_end_epoch_key(key: &Key) -> bool { /// Check if key is counter key pub fn is_counter_key(key: &Key) -> bool { - match &key.segments[..] { - [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(counter)] - if addr == &ADDRESS && counter == COUNTER_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(counter)] if addr == &ADDRESS && counter == COUNTER_KEY) } /// Check if key is a proposal fund parameter key pub fn is_min_proposal_fund_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_funds_param), - ] if addr == &ADDRESS && min_funds_param == MIN_PROPOSAL_FUND_KEY => { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(min_funds_param), + ] if addr == &ADDRESS && min_funds_param == MIN_PROPOSAL_FUND_KEY) } /// Check if key is a proposal max content parameter key pub fn is_max_content_size_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(max_content_size_param), - ] if addr == &ADDRESS - && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_content_size_param), + ] if addr == &ADDRESS + && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY) } /// Check if key is a max proposal size key pub fn is_max_proposal_code_size_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(max_content_size_param), - ] if addr == &ADDRESS - && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_content_size_param), + ] if addr == &ADDRESS + && max_content_size_param == MAX_PROPOSAL_CONTENT_SIZE_KEY) } /// Check if key is a min proposal period param key pub fn is_min_proposal_period_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_proposal_period_param), - ] if addr == &ADDRESS - && min_proposal_period_param == MIN_PROPOSAL_PERIOD_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(min_proposal_period_param), + ] if addr == &ADDRESS + && min_proposal_period_param == MIN_PROPOSAL_PERIOD_KEY) } /// Check if key is a max proposal period param key pub fn is_max_proposal_period_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(max_proposal_period_param), - ] if addr == &ADDRESS - && max_proposal_period_param == MAX_PROPOSAL_PERIOD_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_proposal_period_param), + ] if addr == &ADDRESS + && max_proposal_period_param == MAX_PROPOSAL_PERIOD_KEY) } /// Check if key is a min grace epoch key pub fn is_commit_proposal_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(epoch_prefix), - DbKeySeg::StringSeg(_epoch), - DbKeySeg::StringSeg(_id), - ] if addr == &ADDRESS - && prefix == PROPOSAL_PREFIX - && epoch_prefix == PROPOSAL_COMMITTING_EPOCH => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::StringSeg(epoch_prefix), + DbKeySeg::StringSeg(_epoch), + DbKeySeg::StringSeg(_id), + ] if addr == &ADDRESS + && prefix == PROPOSAL_PREFIX + && epoch_prefix == PROPOSAL_COMMITTING_EPOCH + ) } /// Check if key is a commit proposal key pub fn is_min_grace_epoch_key(key: &Key) -> bool { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_grace_epoch_param), - ] if addr == &ADDRESS - && min_grace_epoch_param == MIN_GRACE_EPOCH_KEY => - { - true - } - _ => false, - } + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(min_grace_epoch_param), + ] if addr == &ADDRESS + && min_grace_epoch_param == MIN_GRACE_EPOCH_KEY) } /// Check if key is parameter key diff --git a/shared/src/ledger/ibc/handler.rs b/core/src/ledger/ibc/actions.rs similarity index 99% rename from shared/src/ledger/ibc/handler.rs rename to core/src/ledger/ibc/actions.rs index 0c76f086eb6..4e09f269c28 100644 --- a/shared/src/ledger/ibc/handler.rs +++ b/core/src/ledger/ibc/actions.rs @@ -68,16 +68,16 @@ use crate::ibc::events::IbcEvent; #[cfg(any(feature = "ibc-mocks-abcipp", feature = "ibc-mocks"))] use crate::ibc::mock::client_state::{MockClientState, MockConsensusState}; use crate::ibc::timestamp::Timestamp; +use crate::ledger::ibc::data::{ + Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, + PacketReceipt, +}; use crate::ledger::ibc::storage; use crate::ledger::storage_api; use crate::tendermint::Time; use crate::tendermint_proto::{Error as ProtoError, Protobuf}; use crate::types::address::{Address, InternalAddress}; -use crate::types::ibc::data::{ - Error as IbcDataError, FungibleTokenPacketData, IbcMessage, PacketAck, - PacketReceipt, -}; -use crate::types::ibc::IbcEvent as AnomaIbcEvent; +use crate::types::ibc::IbcEvent as NamadaIbcEvent; use crate::types::storage::{BlockHeight, Key}; use crate::types::time::Rfc3339String; use crate::types::token::{self, Amount}; @@ -157,7 +157,7 @@ pub trait IbcActions { /// Emit an IBC event fn emit_ibc_event( &mut self, - event: AnomaIbcEvent, + event: NamadaIbcEvent, ) -> std::result::Result<(), Self::Error>; /// Transfer token @@ -939,7 +939,7 @@ pub trait IbcActions { if let Some(hash) = storage::token_hash_from_denom(&data.denom) .map_err(Error::IbcStorage)? { - let denom_key = storage::ibc_denom_key(&hash); + let denom_key = storage::ibc_denom_key(hash); let denom_bytes = self.read_ibc_data(&denom_key)?.ok_or_else(|| { Error::SendingToken(format!( @@ -1339,7 +1339,7 @@ pub fn channel_counterparty( ChanCounterparty::new(port_id, Some(channel_id)) } -/// Returns Anoma commitment prefix +/// Returns Namada commitment prefix pub fn commitment_prefix() -> CommitmentPrefix { CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) .expect("the conversion shouldn't fail") diff --git a/shared/src/types/ibc/data.rs b/core/src/ledger/ibc/data.rs similarity index 100% rename from shared/src/types/ibc/data.rs rename to core/src/ledger/ibc/data.rs diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs new file mode 100644 index 00000000000..f98fb2e4327 --- /dev/null +++ b/core/src/ledger/ibc/mod.rs @@ -0,0 +1,5 @@ +//! IBC library code + +pub mod actions; +pub mod data; +pub mod storage; diff --git a/shared/src/ledger/ibc/storage.rs b/core/src/ledger/ibc/storage.rs similarity index 100% rename from shared/src/ledger/ibc/storage.rs rename to core/src/ledger/ibc/storage.rs diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs new file mode 100644 index 00000000000..9a929cccd32 --- /dev/null +++ b/core/src/ledger/mod.rs @@ -0,0 +1,13 @@ +//! The ledger modules + +pub mod eth_bridge; +pub mod gas; +pub mod governance; +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +pub mod ibc; +pub mod parameters; +pub mod slash_fund; +pub mod storage; +pub mod storage_api; +pub mod tx_env; +pub mod vp_env; diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs new file mode 100644 index 00000000000..cb84bd56e73 --- /dev/null +++ b/core/src/ledger/parameters/mod.rs @@ -0,0 +1,491 @@ +//! Protocol parameters +pub mod storage; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use rust_decimal::Decimal; +use thiserror::Error; + +use super::storage::types::{decode, encode}; +use super::storage::{types, Storage}; +use crate::ledger::storage::{self as ledger_storage}; +use crate::types::address::{Address, InternalAddress}; +use crate::types::storage::Key; +use crate::types::time::DurationSecs; + +const ADDRESS: Address = Address::Internal(InternalAddress::Parameters); + +/// Protocol parameters +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct Parameters { + /// Epoch duration (read only) + pub epoch_duration: EpochDuration, + /// Maximum expected time per block (read only) + pub max_expected_time_per_block: DurationSecs, + /// Whitelisted validity predicate hashes (read only) + pub vp_whitelist: Vec, + /// Whitelisted tx hashes (read only) + pub tx_whitelist: Vec, + /// Implicit accounts validity predicate WASM code + pub implicit_vp: Vec, + /// Expected number of epochs per year (read only) + pub epochs_per_year: u64, + /// PoS gain p (read only) + pub pos_gain_p: Decimal, + /// PoS gain d (read only) + pub pos_gain_d: Decimal, + /// PoS staked ratio (read + write for every epoch) + pub staked_ratio: Decimal, + /// PoS inflation amount from the last epoch (read + write for every epoch) + pub pos_inflation_amount: u64, +} + +/// Epoch duration. A new epoch begins as soon as both the `min_num_of_blocks` +/// and `min_duration` have passed since the beginning of the current epoch. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct EpochDuration { + /// Minimum number of blocks in an epoch + pub min_num_of_blocks: u64, + /// Minimum duration of an epoch + pub min_duration: DurationSecs, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ReadError { + #[error("Storage error: {0}")] + StorageError(ledger_storage::Error), + #[error("Storage type error: {0}")] + StorageTypeError(types::Error), + #[error("Protocol parameters are missing, they must be always set")] + ParametersMissing, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum WriteError { + #[error("Storage error: {0}")] + StorageError(ledger_storage::Error), + #[error("Serialize error: {0}")] + SerializeError(String), +} + +impl Parameters { + /// Initialize parameters in storage in the genesis block. + pub fn init_storage(&self, storage: &mut Storage) + where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, + { + let Self { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + } = self; + + // write epoch parameters + let epoch_key = storage::get_epoch_duration_storage_key(); + let epoch_value = encode(epoch_duration); + storage.write(&epoch_key, epoch_value).expect( + "Epoch parameters must be initialized in the genesis block", + ); + + // write vp whitelist parameter + let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); + let vp_whitelist_value = encode(&vp_whitelist); + storage.write(&vp_whitelist_key, vp_whitelist_value).expect( + "Vp whitelist parameter must be initialized in the genesis block", + ); + + // write tx whitelist parameter + let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); + let tx_whitelist_value = encode(&tx_whitelist); + storage.write(&tx_whitelist_key, tx_whitelist_value).expect( + "Tx whitelist parameter must be initialized in the genesis block", + ); + + // write tx whitelist parameter + let max_expected_time_per_block_key = + storage::get_max_expected_time_per_block_key(); + let max_expected_time_per_block_value = + encode(&max_expected_time_per_block); + storage + .write( + &max_expected_time_per_block_key, + max_expected_time_per_block_value, + ) + .expect( + "Max expected time per block parameter must be initialized in \ + the genesis block", + ); + + // write implicit vp parameter + let implicit_vp_key = storage::get_implicit_vp_key(); + storage.write(&implicit_vp_key, implicit_vp).expect( + "Implicit VP parameter must be initialized in the genesis block", + ); + + let epochs_per_year_key = storage::get_epochs_per_year_key(); + let epochs_per_year_value = encode(epochs_per_year); + storage + .write(&epochs_per_year_key, epochs_per_year_value) + .expect( + "Epochs per year parameter must be initialized in the genesis \ + block", + ); + + let pos_gain_p_key = storage::get_pos_gain_p_key(); + let pos_gain_p_value = encode(pos_gain_p); + storage.write(&pos_gain_p_key, pos_gain_p_value).expect( + "PoS P-gain parameter must be initialized in the genesis block", + ); + + let pos_gain_d_key = storage::get_pos_gain_d_key(); + let pos_gain_d_value = encode(pos_gain_d); + storage.write(&pos_gain_d_key, pos_gain_d_value).expect( + "PoS D-gain parameter must be initialized in the genesis block", + ); + + let staked_ratio_key = storage::get_staked_ratio_key(); + let staked_ratio_val = encode(staked_ratio); + storage.write(&staked_ratio_key, staked_ratio_val).expect( + "PoS staked ratio parameter must be initialized in the genesis \ + block", + ); + + let pos_inflation_key = storage::get_pos_inflation_amount_key(); + let pos_inflation_val = encode(pos_inflation_amount); + storage.write(&pos_inflation_key, pos_inflation_val).expect( + "PoS inflation rate parameter must be initialized in the genesis \ + block", + ); + } +} +/// Update the max_expected_time_per_block parameter in storage. Returns the +/// parameters and gas cost. +pub fn update_max_expected_time_per_block_parameter( + storage: &mut Storage, + value: &DurationSecs, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_max_expected_time_per_block_key(); + update(storage, value, key) +} + +/// Update the vp whitelist parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_vp_whitelist_parameter( + storage: &mut Storage, + value: Vec, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_vp_whitelist_storage_key(); + update(storage, &value, key) +} + +/// Update the tx whitelist parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_tx_whitelist_parameter( + storage: &mut Storage, + value: Vec, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_tx_whitelist_storage_key(); + update(storage, &value, key) +} + +/// Update the epoch parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_epoch_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_epoch_duration_storage_key(); + update(storage, value, key) +} + +/// Update the epochs_per_year parameter in storage. Returns the parameters and +/// gas cost. +pub fn update_epochs_per_year_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_epochs_per_year_key(); + update(storage, value, key) +} + +/// Update the PoS P-gain parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_pos_gain_p_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_pos_gain_p_key(); + update(storage, value, key) +} + +/// Update the PoS D-gain parameter in storage. Returns the parameters and gas +/// cost. +pub fn update_pos_gain_d_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_pos_gain_d_key(); + update(storage, value, key) +} + +/// Update the PoS staked ratio parameter in storage. Returns the parameters and +/// gas cost. +pub fn update_staked_ratio_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_staked_ratio_key(); + update(storage, value, key) +} + +/// Update the PoS inflation rate parameter in storage. Returns the parameters +/// and gas cost. +pub fn update_pos_inflation_amount_parameter( + storage: &mut Storage, + value: &EpochDuration, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_pos_inflation_amount_key(); + update(storage, value, key) +} + +/// Update the implicit VP parameter in storage. Return the gas cost. +pub fn update_implicit_vp( + storage: &mut Storage, + implicit_vp: &[u8], +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + let key = storage::get_implicit_vp_key(); + // Not using `fn update` here, because implicit_vp doesn't need to be + // encoded, it's bytes already. + let (gas, _size_diff) = storage + .write(&key, implicit_vp) + .map_err(WriteError::StorageError)?; + Ok(gas) +} + +/// Update the parameters in storage. Returns the parameters and gas +/// cost. +pub fn update( + storage: &mut Storage, + value: &T, + key: Key, +) -> std::result::Result +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, + T: BorshSerialize, +{ + let serialized_value = value + .try_to_vec() + .map_err(|e| WriteError::SerializeError(e.to_string()))?; + let (gas, _size_diff) = storage + .write(&key, serialized_value) + .map_err(WriteError::StorageError)?; + Ok(gas) +} + +/// Read the the epoch duration parameter from store +pub fn read_epoch_duration_parameter( + storage: &Storage, +) -> std::result::Result<(EpochDuration, u64), ReadError> +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + // read epoch + let epoch_key = storage::get_epoch_duration_storage_key(); + let (value, gas) = + storage.read(&epoch_key).map_err(ReadError::StorageError)?; + let epoch_duration: EpochDuration = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + Ok((epoch_duration, gas)) +} + +// Read the all the parameters from storage. Returns the parameters and gas +/// cost. +pub fn read( + storage: &Storage, +) -> std::result::Result<(Parameters, u64), ReadError> +where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, + H: ledger_storage::StorageHasher, +{ + // read epoch duration + let (epoch_duration, gas_epoch) = read_epoch_duration_parameter(storage) + .expect("Couldn't read epoch duration parameters"); + + // read vp whitelist + let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); + let (value, gas_vp) = storage + .read(&vp_whitelist_key) + .map_err(ReadError::StorageError)?; + let vp_whitelist: Vec = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read tx whitelist + let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); + let (value, gas_tx) = storage + .read(&tx_whitelist_key) + .map_err(ReadError::StorageError)?; + let tx_whitelist: Vec = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + let max_expected_time_per_block_key = + storage::get_max_expected_time_per_block_key(); + let (value, gas_time) = storage + .read(&max_expected_time_per_block_key) + .map_err(ReadError::StorageError)?; + let max_expected_time_per_block: DurationSecs = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + let implicit_vp_key = storage::get_implicit_vp_key(); + let (value, gas_implicit_vp) = storage + .read(&implicit_vp_key) + .map_err(ReadError::StorageError)?; + let implicit_vp = value.ok_or(ReadError::ParametersMissing)?; + + // read epochs per year + let epochs_per_year_key = storage::get_epochs_per_year_key(); + let (value, gas_epy) = storage + .read(&epochs_per_year_key) + .map_err(ReadError::StorageError)?; + let epochs_per_year: u64 = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read PoS gain P + let pos_gain_p_key = storage::get_pos_gain_p_key(); + let (value, gas_gain_p) = storage + .read(&pos_gain_p_key) + .map_err(ReadError::StorageError)?; + let pos_gain_p: Decimal = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read PoS gain D + let pos_gain_d_key = storage::get_pos_gain_d_key(); + let (value, gas_gain_d) = storage + .read(&pos_gain_d_key) + .map_err(ReadError::StorageError)?; + let pos_gain_d: Decimal = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read staked ratio + let staked_ratio_key = storage::get_staked_ratio_key(); + let (value, gas_staked) = storage + .read(&staked_ratio_key) + .map_err(ReadError::StorageError)?; + let staked_ratio: Decimal = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + // read PoS inflation rate + let pos_inflation_key = storage::get_pos_inflation_amount_key(); + let (value, gas_reward) = storage + .read(&pos_inflation_key) + .map_err(ReadError::StorageError)?; + let pos_inflation_amount: u64 = + decode(value.ok_or(ReadError::ParametersMissing)?) + .map_err(ReadError::StorageTypeError)?; + + Ok(( + Parameters { + epoch_duration, + max_expected_time_per_block, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + }, + gas_epoch + + gas_tx + + gas_vp + + gas_time + + gas_implicit_vp + + gas_epy + + gas_gain_p + + gas_gain_d + + gas_staked + + gas_reward, + )) +} diff --git a/core/src/ledger/parameters/storage.rs b/core/src/ledger/parameters/storage.rs new file mode 100644 index 00000000000..b8dc84fd765 --- /dev/null +++ b/core/src/ledger/parameters/storage.rs @@ -0,0 +1,207 @@ +//! Parameters storage +use super::ADDRESS; +use crate::types::storage::{DbKeySeg, Key}; + +const EPOCH_DURATION_KEY: &str = "epoch_duration"; +const VP_WHITELIST_KEY: &str = "vp_whitelist"; +const TX_WHITELIST_KEY: &str = "tx_whitelist"; +const MAX_EXPECTED_TIME_PER_BLOCK_KEY: &str = "max_expected_time_per_block"; +const IMPLICIT_VP_KEY: &str = "implicit_vp"; +const EPOCHS_PER_YEAR_KEY: &str = "epochs_per_year"; +const POS_GAIN_P_KEY: &str = "pos_gain_p"; +const POS_GAIN_D_KEY: &str = "pos_gain_d"; +const STAKED_RATIO_KEY: &str = "staked_ratio_key"; +const POS_INFLATION_AMOUNT_KEY: &str = "pos_inflation_amount_key"; + +/// Returns if the key is a parameter key. +pub fn is_parameter_key(key: &Key) -> bool { + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) +} + +/// Returns if the key is a protocol parameter key. +pub fn is_protocol_parameter_key(key: &Key) -> bool { + is_epoch_duration_storage_key(key) + || is_max_expected_time_per_block_key(key) + || is_tx_whitelist_key(key) + || is_vp_whitelist_key(key) +} + +/// Returns if the key is an epoch storage key. +pub fn is_epoch_duration_storage_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(epoch_duration), + ] if addr == &ADDRESS && epoch_duration == EPOCH_DURATION_KEY) +} + +/// Returns if the key is the max_expected_time_per_block key. +pub fn is_max_expected_time_per_block_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(max_expected_time_per_block), + ] if addr == &ADDRESS && max_expected_time_per_block == MAX_EXPECTED_TIME_PER_BLOCK_KEY) +} + +/// Returns if the key is the tx_whitelist key. +pub fn is_tx_whitelist_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(tx_whitelist), + ] if addr == &ADDRESS && tx_whitelist == TX_WHITELIST_KEY) +} + +/// Returns if the key is the vp_whitelist key. +pub fn is_vp_whitelist_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(vp_whitelist), + ] if addr == &ADDRESS && vp_whitelist == VP_WHITELIST_KEY) +} + +/// Returns if the key is the implicit VP key. +pub fn is_implicit_vp_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(sub_key), + ] if addr == &ADDRESS && sub_key == IMPLICIT_VP_KEY) +} + +/// Returns if the key is the epoch_per_year key. +pub fn is_epochs_per_year_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(epochs_per_year), + ] if addr == &ADDRESS && epochs_per_year == EPOCHS_PER_YEAR_KEY) +} + +/// Returns if the key is the pos_gain_p key. +pub fn is_pos_gain_p_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(pos_gain_p), + ] if addr == &ADDRESS && pos_gain_p == POS_GAIN_P_KEY) +} + +/// Returns if the key is the pos_gain_d key. +pub fn is_pos_gain_d_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(pos_gain_d), + ] if addr == &ADDRESS && pos_gain_d == POS_GAIN_D_KEY) +} + +/// Returns if the key is the staked ratio key. +pub fn is_staked_ratio_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(staked_ratio), + ] if addr == &ADDRESS && staked_ratio == STAKED_RATIO_KEY) +} + +/// Returns if the key is the PoS reward rate key. +pub fn is_pos_inflation_amount_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(pos_inflation_amount), + ] if addr == &ADDRESS && pos_inflation_amount == POS_INFLATION_AMOUNT_KEY) +} + +/// Storage key used for epoch parameter. +pub fn get_epoch_duration_storage_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(EPOCH_DURATION_KEY.to_string()), + ], + } +} + +/// Storage key used for vp whitelist parameter. +pub fn get_vp_whitelist_storage_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(VP_WHITELIST_KEY.to_string()), + ], + } +} + +/// Storage key used for tx whitelist parameter. +pub fn get_tx_whitelist_storage_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(TX_WHITELIST_KEY.to_string()), + ], + } +} + +/// Storage key used for max_epected_time_per_block parameter. +pub fn get_max_expected_time_per_block_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(MAX_EXPECTED_TIME_PER_BLOCK_KEY.to_string()), + ], + } +} + +/// Storage key used for implicit VP parameter. +pub fn get_implicit_vp_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(IMPLICIT_VP_KEY.to_string()), + ], + } +} + +/// Storage key used for epochs_per_year parameter. +pub fn get_epochs_per_year_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(EPOCHS_PER_YEAR_KEY.to_string()), + ], + } +} + +/// Storage key used for pos_gain_p parameter. +pub fn get_pos_gain_p_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(POS_GAIN_P_KEY.to_string()), + ], + } +} + +/// Storage key used for pos_gain_d parameter. +pub fn get_pos_gain_d_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(POS_GAIN_D_KEY.to_string()), + ], + } +} + +/// Storage key used for staked ratio parameter. +pub fn get_staked_ratio_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(STAKED_RATIO_KEY.to_string()), + ], + } +} + +/// Storage key used for the inflation amount parameter. +pub fn get_pos_inflation_amount_key() -> Key { + Key { + segments: vec![ + DbKeySeg::AddressSeg(ADDRESS), + DbKeySeg::StringSeg(POS_INFLATION_AMOUNT_KEY.to_string()), + ], + } +} diff --git a/core/src/ledger/slash_fund/mod.rs b/core/src/ledger/slash_fund/mod.rs new file mode 100644 index 00000000000..7a7d53963b0 --- /dev/null +++ b/core/src/ledger/slash_fund/mod.rs @@ -0,0 +1,8 @@ +//! SlashFund library code + +use crate::types::address::{Address, InternalAddress}; + +/// Internal SlashFund address +pub const ADDRESS: Address = Address::Internal(InternalAddress::SlashFund); + +pub mod storage; diff --git a/shared/src/ledger/slash_fund/storage.rs b/core/src/ledger/slash_fund/storage.rs similarity index 80% rename from shared/src/ledger/slash_fund/storage.rs rename to core/src/ledger/slash_fund/storage.rs index 60d29f0f480..9c437da591c 100644 --- a/shared/src/ledger/slash_fund/storage.rs +++ b/core/src/ledger/slash_fund/storage.rs @@ -1,7 +1,8 @@ -use super::ADDRESS; +//! Slash fund storage + use crate::types::storage::{DbKeySeg, Key}; /// Check if a key is a slash fund key pub fn is_slash_fund_key(key: &Key) -> bool { - matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &super::ADDRESS) } diff --git a/shared/src/ledger/storage/ics23_specs.rs b/core/src/ledger/storage/ics23_specs.rs similarity index 100% rename from shared/src/ledger/storage/ics23_specs.rs rename to core/src/ledger/storage/ics23_specs.rs diff --git a/shared/src/ledger/storage/merkle_tree.rs b/core/src/ledger/storage/merkle_tree.rs similarity index 94% rename from shared/src/ledger/storage/merkle_tree.rs rename to core/src/ledger/storage/merkle_tree.rs index edd4d2b4521..d5f9163880a 100644 --- a/shared/src/ledger/storage/merkle_tree.rs +++ b/core/src/ledger/storage/merkle_tree.rs @@ -10,23 +10,21 @@ use arse_merkle_tree::{ use borsh::{BorshDeserialize, BorshSerialize}; use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; -use prost::Message; use thiserror::Error; use super::traits::{StorageHasher, SubTreeRead, SubTreeWrite}; -use super::IBC_KEY_LIMIT; use crate::bytes::ByteBuf; use crate::ledger::eth_bridge::storage::bridge_pool::{ get_signed_root_key, BridgePoolTree, }; use crate::ledger::storage::ics23_specs::ibc_leaf_spec; use crate::ledger::storage::{ics23_specs, types}; -use crate::tendermint::merkle::proof::{Proof, ProofOp}; use crate::types::address::{Address, InternalAddress}; use crate::types::hash::Hash; use crate::types::keccak::KeccakHash; use crate::types::storage::{ - DbKeySeg, Error as StorageError, Key, MembershipProof, StringKey, TreeBytes, + self, DbKeySeg, Error as StorageError, Key, MembershipProof, StringKey, + TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; #[allow(missing_docs)] @@ -36,6 +34,8 @@ pub enum Error { InvalidKey(StorageError), #[error("Invalid key for merkle tree: {0}")] InvalidMerkleKey(String), + #[error("Storage tree key error: {0}")] + StorageTreeKey(#[from] TreeKeyError), #[error("Empty Key: {0}")] EmptyKey(String), #[error("Merkle Tree error: {0}")] @@ -59,10 +59,15 @@ type Result = std::result::Result; pub(super) type StorageBytes<'a> = &'a [u8]; /// Type aliases for the different merkle trees and backing stores +/// Sparse-merkle-tree store pub type SmtStore = DefaultStore; +/// Arse-merkle-tree store pub type AmtStore = DefaultStore; +/// Bridge pool store pub type BridgePoolStore = std::collections::BTreeSet; +/// Sparse-merkle-tree pub type Smt = ArseMerkleTree; +/// Arse-merkle-tree pub type Amt = ArseMerkleTree; @@ -106,6 +111,7 @@ pub enum Store { } impl Store { + /// Convert to a `StoreRef` with borrowed store pub fn as_ref(&self) -> StoreRef { match self { Self::Base(store) => StoreRef::Base(store), @@ -432,25 +438,15 @@ impl MerkleTree { } // Get a proof of the sub tree - self.get_tendermint_proof(key, nep) + self.get_sub_tree_proof(key, nep) } /// Get the Tendermint proof with the base proof - pub fn get_tendermint_proof( + pub fn get_sub_tree_proof( &self, key: &Key, sub_proof: CommitmentProof, ) -> Result { - let mut data = vec![]; - sub_proof - .encode(&mut data) - .expect("Encoding proof shouldn't fail"); - let sub_proof_op = ProofOp { - field_type: "ics23_CommitmentProof".to_string(), - key: key.to_string().as_bytes().to_vec(), - data, - }; - // Get a membership proof of the base tree because the sub root should // exist let (store_type, _) = StoreType::sub_key(key)?; @@ -469,19 +465,10 @@ impl MerkleTree { _ => unreachable!(), }; - let mut data = vec![]; - base_proof - .encode(&mut data) - .expect("Encoding proof shouldn't fail"); - let base_proof_op = ProofOp { - field_type: "ics23_CommitmentProof".to_string(), - key: key.to_string().as_bytes().to_vec(), - data, - }; - - // Set ProofOps from leaf to root Ok(Proof { - ops: vec![sub_proof_op, base_proof_op], + key: key.clone(), + sub_proof, + base_proof, }) } } @@ -598,6 +585,57 @@ impl From for Error { } } +/// A storage key existence or non-existence proof +#[derive(Debug)] +pub struct Proof { + /// Storage key + pub key: storage::Key, + /// Sub proof + pub sub_proof: CommitmentProof, + /// Base proof + pub base_proof: CommitmentProof, +} + +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::merkle::proof::Proof { + fn from( + Proof { + key, + sub_proof, + base_proof, + }: Proof, + ) -> Self { + use prost::Message; + + use crate::tendermint::merkle::proof::{Proof, ProofOp}; + + let mut data = vec![]; + sub_proof + .encode(&mut data) + .expect("Encoding proof shouldn't fail"); + let sub_proof_op = ProofOp { + field_type: "ics23_CommitmentProof".to_string(), + key: key.to_string().as_bytes().to_vec(), + data, + }; + + let mut data = vec![]; + base_proof + .encode(&mut data) + .expect("Encoding proof shouldn't fail"); + let base_proof_op = ProofOp { + field_type: "ics23_CommitmentProof".to_string(), + key: key.to_string().as_bytes().to_vec(), + data, + }; + + // Set ProofOps from leaf to root + Proof { + ops: vec![sub_proof_op, base_proof_op], + } + } +} + #[cfg(test)] mod test { use super::*; @@ -642,9 +680,7 @@ mod test { let nep = tree .get_non_existence_proof(&ibc_non_key) .expect("Test failed"); - let subtree_nep = nep.ops.get(0).expect("Test failed"); - let nep_commitment_proof = - CommitmentProof::decode(&*subtree_nep.data).expect("Test failed"); + let nep_commitment_proof = nep.sub_proof; let non_existence_proof = match nep_commitment_proof.clone().proof.expect("Test failed") { Ics23Proof::Nonexist(nep) => nep, @@ -668,9 +704,7 @@ mod test { sub_key.to_string().as_bytes(), ); assert!(nep_verification_res); - let basetree_ep = nep.ops.get(1).unwrap(); - let basetree_ep_commitment_proof = - CommitmentProof::decode(&*basetree_ep.data).unwrap(); + let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = match basetree_ep_commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -740,17 +774,19 @@ mod test { MembershipProof::ICS23(proof) => proof, _ => panic!("Test failed"), }; - let proof = tree.get_tendermint_proof(&ibc_key, proof).unwrap(); + let proof = tree.get_sub_tree_proof(&ibc_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&ibc_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; let mut sub_root = ibc_val.clone(); let mut value = ibc_val; // First, the sub proof is verified. Next the base proof is verified // with the sub root - for ((p, spec), key) in - proof.ops.iter().zip(specs.iter()).zip(paths.iter()) + for ((commitment_proof, spec), key) in + [proof.sub_proof, proof.base_proof] + .into_iter() + .zip(specs.iter()) + .zip(paths.iter()) { - let commitment_proof = CommitmentProof::decode(&*p.data).unwrap(); let existence_proof = match commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -800,17 +836,19 @@ mod test { _ => panic!("Test failed"), }; - let proof = tree.get_tendermint_proof(&pos_key, proof).unwrap(); + let proof = tree.get_sub_tree_proof(&pos_key, proof).unwrap(); let (store_type, sub_key) = StoreType::sub_key(&pos_key).unwrap(); let paths = vec![sub_key.to_string(), store_type.to_string()]; let mut sub_root = pos_val.clone(); let mut value = pos_val; // First, the sub proof is verified. Next the base proof is verified // with the sub root - for ((p, spec), key) in - proof.ops.iter().zip(specs.iter()).zip(paths.iter()) + for ((commitment_proof, spec), key) in + [proof.sub_proof, proof.base_proof] + .into_iter() + .zip(specs.iter()) + .zip(paths.iter()) { - let commitment_proof = CommitmentProof::decode(&*p.data).unwrap(); let existence_proof = match commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, @@ -850,9 +888,7 @@ mod test { let nep = tree .get_non_existence_proof(&ibc_non_key) .expect("Test failed"); - let subtree_nep = nep.ops.get(0).expect("Test failed"); - let nep_commitment_proof = - CommitmentProof::decode(&*subtree_nep.data).expect("Test failed"); + let nep_commitment_proof = nep.sub_proof; let non_existence_proof = match nep_commitment_proof.clone().proof.expect("Test failed") { Ics23Proof::Nonexist(nep) => nep, @@ -876,9 +912,7 @@ mod test { sub_key.to_string().as_bytes(), ); assert!(nep_verification_res); - let basetree_ep = nep.ops.get(1).unwrap(); - let basetree_ep_commitment_proof = - CommitmentProof::decode(&*basetree_ep.data).unwrap(); + let basetree_ep_commitment_proof = nep.base_proof; let basetree_ics23_ep = match basetree_ep_commitment_proof.clone().proof.unwrap() { Ics23Proof::Exist(ep) => ep, diff --git a/shared/src/ledger/storage/mockdb.rs b/core/src/ledger/storage/mockdb.rs similarity index 99% rename from shared/src/ledger/storage/mockdb.rs rename to core/src/ledger/storage/mockdb.rs index 5f4e583c082..950084acc80 100644 --- a/shared/src/ledger/storage/mockdb.rs +++ b/core/src/ledger/storage/mockdb.rs @@ -14,7 +14,7 @@ use super::{ }; use crate::ledger::storage::types::{self, KVBytes, PrefixIterator}; #[cfg(feature = "ferveo-tpke")] -use crate::types::storage::TxQueue; +use crate::types::internal::TxQueue; use crate::types::storage::{ BlockHeight, BlockResults, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, }; diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs new file mode 100644 index 00000000000..55e8ced657c --- /dev/null +++ b/core/src/ledger/storage/mod.rs @@ -0,0 +1,1387 @@ +//! Ledger's state storage with key-value backed store and a merkle tree + +pub mod ics23_specs; +pub mod merkle_tree; +#[cfg(any(test, feature = "testing"))] +pub mod mockdb; +pub mod traits; +pub mod types; + +use core::fmt::Debug; +use std::collections::BTreeMap; + +use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::asset_type::AssetType; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::merkle_tree::FrozenCommitmentTree; +use masp_primitives::sapling::Node; +pub use merkle_tree::{ + MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, +}; +#[cfg(feature = "wasm-runtime")] +use rayon::iter::{ + IndexedParallelIterator, IntoParallelIterator, ParallelIterator, +}; +#[cfg(feature = "wasm-runtime")] +use rayon::prelude::ParallelSlice; +use thiserror::Error; +pub use traits::{Sha256Hasher, StorageHasher}; + +use crate::ledger::gas::MIN_STORAGE_GAS; +use crate::ledger::parameters::{self, EpochDuration, Parameters}; +use crate::ledger::storage::merkle_tree::{ + Error as MerkleTreeError, MerkleRoot, +}; +use crate::ledger::storage_api; +use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +use crate::tendermint::merkle::proof::Proof; +use crate::types::address::{ + masp, Address, EstablishedAddressGen, InternalAddress, +}; +use crate::types::chain::{ChainId, CHAIN_ID_LENGTH}; +// TODO +#[cfg(feature = "ferveo-tpke")] +use crate::types::internal::TxQueue; +use crate::types::storage::{ + BlockHash, BlockHeight, BlockResults, Epoch, Epochs, Header, Key, KeySeg, + TxIndex, BLOCK_HASH_LENGTH, +}; +use crate::types::time::DateTimeUtc; +use crate::types::token; + +/// A result of a function that may fail +pub type Result = std::result::Result; +/// A representation of the conversion state +#[derive(Debug, Default, BorshSerialize, BorshDeserialize)] +pub struct ConversionState { + /// The merkle root from the previous epoch + pub prev_root: Node, + /// The tree currently containing all the conversions + pub tree: FrozenCommitmentTree, + /// Map assets to their latest conversion and position in Merkle tree + pub assets: BTreeMap, +} + +/// The storage data +#[derive(Debug)] +pub struct Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// The database for the storage + pub db: D, + /// The ID of the chain + pub chain_id: ChainId, + /// The address of the native token - this is not stored in DB, but read + /// from genesis + pub native_token: Address, + /// Block storage data + pub block: BlockStorage, + /// During `FinalizeBlock`, this is the header of the block that is + /// going to be committed. After a block is committed, this is reset to + /// `None` until the next `FinalizeBlock` phase is reached. + pub header: Option
, + /// The height of the most recently committed block, or `BlockHeight(0)` if + /// no block has been committed for this chain yet. + pub last_height: BlockHeight, + /// The epoch of the most recently committed block. If it is `Epoch(0)`, + /// then no block may have been committed for this chain yet. + pub last_epoch: Epoch, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// The current established address generator + pub address_gen: EstablishedAddressGen, + /// The shielded transaction index + pub tx_index: TxIndex, + /// The currently saved conversion state + pub conversion_state: ConversionState, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: TxQueue, +} + +/// The block storage data +#[derive(Debug)] +pub struct BlockStorage { + /// Merkle tree of all the other data in block storage + pub tree: MerkleTree, + /// During `FinalizeBlock`, this is updated to be the hash of the block + /// that is going to be committed. If it is `BlockHash::default()`, + /// then no `FinalizeBlock` stage has been reached yet. + pub hash: BlockHash, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise, it is the + /// height of the most recently committed block, or `BlockHeight(0)` if no + /// block has been committed yet. + pub height: BlockHeight, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise it is the + /// epoch of the most recently committed block, or `Epoch(0)` if no block + /// has been committed yet. + pub epoch: Epoch, + /// Results of applying transactions + pub results: BlockResults, + /// Predecessor block epochs + pub pred_epochs: Epochs, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("TEMPORARY error: {error}")] + Temporary { error: String }, + #[error("Found an unknown key: {key}")] + UnknownKey { key: String }, + #[error("Storage key error {0}")] + KeyError(crate::types::storage::Error), + #[error("Coding error: {0}")] + CodingError(types::Error), + #[error("Merkle tree error: {0}")] + MerkleTreeError(MerkleTreeError), + #[error("DB error: {0}")] + DBError(String), + #[error("Borsh (de)-serialization error: {0}")] + BorshCodingError(std::io::Error), + #[error("Merkle tree at the height {height} is not stored")] + NoMerkleTree { height: BlockHeight }, +} + +/// The block's state as stored in the database. +pub struct BlockStateRead { + /// Merkle tree stores + pub merkle_tree_stores: MerkleTreeStoresRead, + /// Hash of the block + pub hash: BlockHash, + /// Height of the block + pub height: BlockHeight, + /// Epoch of the block + pub epoch: Epoch, + /// Predecessor block epochs + pub pred_epochs: Epochs, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// Established address generator + pub address_gen: EstablishedAddressGen, + /// Results of applying transactions + pub results: BlockResults, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: TxQueue, +} + +/// The block's state to write into the database. +pub struct BlockStateWrite<'a> { + /// Merkle tree stores + pub merkle_tree_stores: MerkleTreeStoresWrite<'a>, + /// Header of the block + pub header: Option<&'a Header>, + /// Hash of the block + pub hash: &'a BlockHash, + /// Height of the block + pub height: BlockHeight, + /// Epoch of the block + pub epoch: Epoch, + /// Predecessor block epochs + pub pred_epochs: &'a Epochs, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// Established address generator + pub address_gen: &'a EstablishedAddressGen, + /// Results of applying transactions + pub results: &'a BlockResults, + /// Wrapper txs to be decrypted in the next block proposal + #[cfg(feature = "ferveo-tpke")] + pub tx_queue: &'a TxQueue, +} + +/// A database backend. +pub trait DB: std::fmt::Debug { + /// A DB's cache + type Cache; + /// A handle for batch writes + type WriteBatch: DBWriteBatch; + + /// Open the database from provided path + fn open( + db_path: impl AsRef, + cache: Option<&Self::Cache>, + ) -> Self; + + /// Flush data on the memory to persistent them + fn flush(&self, wait: bool) -> Result<()>; + + /// Read the last committed block's metadata + fn read_last_block(&mut self) -> Result>; + + /// Write block's metadata + fn write_block(&mut self, state: BlockStateWrite) -> Result<()>; + + /// Read the block header with the given height from the DB + fn read_block_header(&self, height: BlockHeight) -> Result>; + + /// Read the merkle tree stores with the given height + fn read_merkle_tree_stores( + &self, + height: BlockHeight, + ) -> Result>; + + /// Read the latest value for account subspace key from the DB + fn read_subspace_val(&self, key: &Key) -> Result>>; + + /// Read the value for account subspace key at the given height from the DB. + /// In our `PersistentStorage` (rocksdb), to find a value from arbitrary + /// height requires looking for diffs from the given `height`, possibly + /// up to the `last_height`. + fn read_subspace_val_with_height( + &self, + key: &Key, + height: BlockHeight, + last_height: BlockHeight, + ) -> Result>>; + + /// Write the value with the given height and account subspace key to the + /// DB. Returns the size difference from previous value, if any, or the + /// size of the value otherwise. + fn write_subspace_val( + &mut self, + height: BlockHeight, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result; + + /// Delete the value with the given height and account subspace key from the + /// DB. Returns the size of the removed value, if any, 0 if no previous + /// value was found. + fn delete_subspace_val( + &mut self, + height: BlockHeight, + key: &Key, + ) -> Result; + + /// Start write batch. + fn batch() -> Self::WriteBatch; + + /// Execute write batch. + fn exec_batch(&mut self, batch: Self::WriteBatch) -> Result<()>; + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + fn batch_write_subspace_val( + &self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result; + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + fn batch_delete_subspace_val( + &self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + key: &Key, + ) -> Result; +} + +/// A database prefix iterator. +pub trait DBIter<'iter> { + /// The concrete type of the iterator + type PrefixIter: Debug + Iterator, u64)>; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// ordered by the storage keys. + fn iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// reverse ordered by the storage keys. + fn rev_iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read results subspace key value pairs from the DB + fn iter_results(&'iter self) -> Self::PrefixIter; +} + +/// Atomic batch write. +pub trait DBWriteBatch { + /// Insert a value into the database under the given key. + fn put(&mut self, key: K, value: V) + where + K: AsRef<[u8]>, + V: AsRef<[u8]>; + + /// Removes the database entry for key. Does nothing if the key was not + /// found. + fn delete>(&mut self, key: K); +} + +impl Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// open up a new instance of the storage given path to db and chain id + pub fn open( + db_path: impl AsRef, + chain_id: ChainId, + native_token: Address, + cache: Option<&D::Cache>, + ) -> Self { + let block = BlockStorage { + tree: MerkleTree::default(), + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + Storage:: { + db: D::open(db_path, cache), + chain_id, + block, + header: None, + last_height: BlockHeight(0), + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Privacy is a function of liberty.", + ), + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + #[cfg(feature = "ferveo-tpke")] + tx_queue: TxQueue::default(), + native_token, + } + } + + /// Load the full state at the last committed height, if any. Returns the + /// Merkle root hash and the height of the committed block. + pub fn load_last_state(&mut self) -> Result<()> { + if let Some(BlockStateRead { + merkle_tree_stores, + hash, + height, + epoch, + pred_epochs, + next_epoch_min_start_height, + next_epoch_min_start_time, + results, + address_gen, + #[cfg(feature = "ferveo-tpke")] + tx_queue, + }) = self.db.read_last_block()? + { + self.block.tree = MerkleTree::new(merkle_tree_stores); + self.block.hash = hash; + self.block.height = height; + self.block.epoch = epoch; + self.block.results = results; + self.block.pred_epochs = pred_epochs; + self.last_height = height; + self.last_epoch = epoch; + self.next_epoch_min_start_height = next_epoch_min_start_height; + self.next_epoch_min_start_time = next_epoch_min_start_time; + self.address_gen = address_gen; + if self.last_epoch.0 > 1 { + // The derived conversions will be placed in MASP address space + let masp_addr = masp(); + let key_prefix: Key = masp_addr.to_db_key().into(); + // Load up the conversions currently being given as query + // results + let state_key = key_prefix + .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) + .map_err(Error::KeyError)?; + self.conversion_state = types::decode( + self.read(&state_key) + .expect("unable to read conversion state") + .0 + .expect("unable to find conversion state"), + ) + .expect("unable to decode conversion state") + } + #[cfg(feature = "ferveo-tpke")] + { + self.tx_queue = tx_queue; + } + tracing::debug!("Loaded storage from DB"); + } else { + tracing::info!("No state could be found"); + } + Ok(()) + } + + /// Returns the Merkle root hash and the height of the committed block. If + /// no block exists, returns None. + pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { + if self.block.height.0 != 0 { + Some((self.block.tree.root(), self.block.height.0)) + } else { + None + } + } + + /// Persist the current block's state to the database + pub fn commit(&mut self) -> Result<()> { + let state = BlockStateWrite { + merkle_tree_stores: self.block.tree.stores(), + header: self.header.as_ref(), + hash: &self.block.hash, + height: self.block.height, + epoch: self.block.epoch, + results: &self.block.results, + pred_epochs: &self.block.pred_epochs, + next_epoch_min_start_height: self.next_epoch_min_start_height, + next_epoch_min_start_time: self.next_epoch_min_start_time, + address_gen: &self.address_gen, + #[cfg(feature = "ferveo-tpke")] + tx_queue: &self.tx_queue, + }; + self.db.write_block(state)?; + self.last_height = self.block.height; + self.last_epoch = self.block.epoch; + self.header = None; + Ok(()) + } + + /// Find the root hash of the merkle tree + pub fn merkle_root(&self) -> MerkleRoot { + self.block.tree.root() + } + + /// Check if the given key is present in storage. Returns the result and the + /// gas cost. + pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { + Ok((self.block.tree.has_key(key)?, key.len() as _)) + } + + /// Returns a value from the specified subspace and the gas cost + pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { + tracing::debug!("storage read key {}", key); + let (present, gas) = self.has_key(key)?; + if !present { + return Ok((None, gas)); + } + + match self.db.read_subspace_val(key)? { + Some(v) => { + let gas = key.len() + v.len(); + Ok((Some(v), gas as _)) + } + None => Ok((None, key.len() as _)), + } + } + + /// Returns a value from the specified subspace at the given height and the + /// gas cost + pub fn read_with_height( + &self, + key: &Key, + height: BlockHeight, + ) -> Result<(Option>, u64)> { + if height >= self.last_height { + self.read(key) + } else { + match self.db.read_subspace_val_with_height( + key, + height, + self.last_height, + )? { + Some(v) => { + let gas = key.len() + v.len(); + Ok((Some(v), gas as _)) + } + None => Ok((None, key.len() as _)), + } + } + } + + /// Returns a prefix iterator, ordered by storage keys, and the gas cost + pub fn iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.iter_prefix(prefix), prefix.len() as _) + } + + /// Returns a prefix iterator, reverse ordered by storage keys, and the gas + /// cost + pub fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.rev_iter_prefix(prefix), prefix.len() as _) + } + + /// Returns a prefix iterator and the gas cost + pub fn iter_results(&self) -> (>::PrefixIter, u64) { + (self.db.iter_results(), 0) + } + + /// Write a value to the specified subspace and returns the gas cost and the + /// size difference + pub fn write( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::write_bytes`, + // but with gas and storage bytes len diff accounting + tracing::debug!("storage write key {}", key,); + let value = value.as_ref(); + self.block.tree.update(key, value)?; + + let len = value.len(); + let gas = key.len() + len; + let size_diff = + self.db.write_subspace_val(self.block.height, key, value)?; + Ok((gas as _, size_diff)) + } + + /// Delete the specified subspace and returns the gas cost and the size + /// difference + pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::delete`, + // but with gas and storage bytes len diff accounting + let mut deleted_bytes_len = 0; + if self.has_key(key)?.0 { + self.block.tree.delete(key)?; + deleted_bytes_len = + self.db.delete_subspace_val(self.block.height, key)?; + } + let gas = key.len() + deleted_bytes_len as usize; + Ok((gas as _, deleted_bytes_len)) + } + + /// Set the block header. + /// The header is not in the Merkle tree as it's tracked by Tendermint. + /// Hence, we don't update the tree when this is set. + pub fn set_header(&mut self, header: Header) -> Result<()> { + self.header = Some(header); + Ok(()) + } + + /// Block data is in the Merkle tree as it's tracked by Tendermint in the + /// block header. Hence, we don't update the tree when this is set. + pub fn begin_block( + &mut self, + hash: BlockHash, + height: BlockHeight, + ) -> Result<()> { + self.block.hash = hash; + self.block.height = height; + Ok(()) + } + + /// Get a validity predicate for the given account address and the gas cost + /// for reading it. + pub fn validity_predicate( + &self, + addr: &Address, + ) -> Result<(Option>, u64)> { + let key = if let Address::Implicit(_) = addr { + parameters::storage::get_implicit_vp_key() + } else { + Key::validity_predicate(addr) + }; + self.read(&key) + } + + #[allow(dead_code)] + /// Check if the given address exists on chain and return the gas cost. + pub fn exists(&self, addr: &Address) -> Result<(bool, u64)> { + let key = Key::validity_predicate(addr); + self.has_key(&key) + } + + /// Get the chain ID as a raw string + pub fn get_chain_id(&self) -> (String, u64) { + (self.chain_id.to_string(), CHAIN_ID_LENGTH as _) + } + + /// Get the block height + pub fn get_block_height(&self) -> (BlockHeight, u64) { + (self.block.height, MIN_STORAGE_GAS) + } + + /// Get the block hash + pub fn get_block_hash(&self) -> (BlockHash, u64) { + (self.block.hash.clone(), BLOCK_HASH_LENGTH as _) + } + + /// Get a Tendermint-compatible existence proof. + /// + /// Proofs from the Ethereum bridge pool are not + /// Tendermint-compatible. Requesting for a key + /// belonging to the bridge pool will cause this + /// method to error. + pub fn get_existence_proof( + &self, + key: &Key, + value: Vec, + height: BlockHeight, + ) -> Result { + use std::array; + + use crate::types::storage::MembershipProof; + + if height >= self.get_block_height().0 { + if let MembershipProof::ICS23(proof) = self + .block + .tree + .get_sub_tree_existence_proof( + array::from_ref(key), + vec![&value], + ) + .map_err(Error::MerkleTreeError)? + { + self.block + .tree + .get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } + } else { + match self.db.read_merkle_tree_stores(height)? { + Some(stores) => { + let tree = MerkleTree::::new(stores); + if let MembershipProof::ICS23(proof) = tree + .get_sub_tree_existence_proof( + array::from_ref(key), + vec![&value], + ) + .map_err(Error::MerkleTreeError)? + { + tree.get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError( + MerkleTreeError::TendermintProof, + )) + } + } + None => Err(Error::NoMerkleTree { height }), + } + } + } + + /// Get the non-existence proof + pub fn get_non_existence_proof( + &self, + key: &Key, + height: BlockHeight, + ) -> Result { + if height >= self.last_height { + self.block + .tree + .get_non_existence_proof(key) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + match self.db.read_merkle_tree_stores(height)? { + Some(stores) => MerkleTree::::new(stores) + .get_non_existence_proof(key) + .map(Into::into) + .map_err(Error::MerkleTreeError), + None => Err(Error::NoMerkleTree { height }), + } + } + } + + /// Get the current (yet to be committed) block epoch + pub fn get_current_epoch(&self) -> (Epoch, u64) { + (self.block.epoch, MIN_STORAGE_GAS) + } + + /// Get the epoch of the last committed block + pub fn get_last_epoch(&self) -> (Epoch, u64) { + (self.last_epoch, MIN_STORAGE_GAS) + } + + /// Initialize the first epoch. The first epoch begins at genesis time. + pub fn init_genesis_epoch( + &mut self, + initial_height: BlockHeight, + genesis_time: DateTimeUtc, + parameters: &Parameters, + ) -> Result<()> { + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = initial_height + min_num_of_blocks; + self.next_epoch_min_start_time = genesis_time + min_duration; + self.update_epoch_in_merkle_tree() + } + + /// Get the block header + pub fn get_block_header( + &self, + height: Option, + ) -> Result<(Option
, u64)> { + match height { + Some(h) if h == self.get_block_height().0 => { + Ok((self.header.clone(), MIN_STORAGE_GAS)) + } + Some(h) => match self.db.read_block_header(h)? { + Some(header) => { + let gas = header.encoded_len() as u64; + Ok((Some(header), gas)) + } + None => Ok((None, MIN_STORAGE_GAS)), + }, + None => Ok((self.header.clone(), MIN_STORAGE_GAS)), + } + } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + #[cfg(feature = "wasm-runtime")] + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> Result { + let (parameters, _gas) = + parameters::read(self).expect("Couldn't read protocol parameters"); + + // Check if the current epoch is over + let new_epoch = height >= self.next_epoch_min_start_height + && time >= self.next_epoch_min_start_time; + if new_epoch { + // Begin a new epoch + self.block.epoch = self.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = height + min_num_of_blocks; + self.next_epoch_min_start_time = time + min_duration; + // TODO put this into PoS parameters and pass it to tendermint + // `consensus_params` on `InitChain` and `EndBlock` + let evidence_max_age_num_blocks: u64 = 100000; + self.block + .pred_epochs + .new_epoch(height, evidence_max_age_num_blocks); + tracing::info!("Began a new epoch {}", self.block.epoch); + self.update_allowed_conversions()?; + } + self.update_epoch_in_merkle_tree()?; + Ok(new_epoch) + } + + /// Get the current conversions + pub fn get_conversion_state(&self) -> &ConversionState { + &self.conversion_state + } + + // Construct MASP asset type with given timestamp for given token + #[cfg(feature = "wasm-runtime")] + fn encode_asset_type(addr: Address, epoch: Epoch) -> AssetType { + let new_asset_bytes = (addr, epoch.0) + .try_to_vec() + .expect("unable to serialize address and epoch"); + AssetType::new(new_asset_bytes.as_ref()) + .expect("unable to derive asset identifier") + } + + #[cfg(feature = "wasm-runtime")] + /// Update the MASP's allowed conversions + fn update_allowed_conversions(&mut self) -> Result<()> { + use masp_primitives::ff::PrimeField; + use masp_primitives::transaction::components::Amount as MaspAmount; + + use crate::types::address::{masp_rewards, nam}; + + // The derived conversions will be placed in MASP address space + let masp_addr = masp(); + let key_prefix: Key = masp_addr.to_db_key().into(); + + let masp_rewards = masp_rewards(); + // The total transparent value of the rewards being distributed + let mut total_reward = token::Amount::from(0); + + // Construct MASP asset type for rewards. Always timestamp reward tokens + // with the zeroth epoch to minimize the number of convert notes clients + // have to use. This trick works under the assumption that reward tokens + // from different epochs are exactly equivalent. + let reward_asset_bytes = (nam(), 0u64) + .try_to_vec() + .expect("unable to serialize address and epoch"); + let reward_asset = AssetType::new(reward_asset_bytes.as_ref()) + .expect("unable to derive asset identifier"); + // Conversions from the previous to current asset for each address + let mut current_convs = BTreeMap::::new(); + // Reward all tokens according to above reward rates + for (addr, reward) in &masp_rewards { + // Dispence a transparent reward in parallel to the shielded rewards + let token_key = self.read(&token::balance_key(addr, &masp_addr)); + if let Ok((Some(addr_balance), _)) = token_key { + // The reward for each reward.1 units of the current asset is + // reward.0 units of the reward token + let addr_bal: token::Amount = + types::decode(addr_balance).expect("invalid balance"); + // Since floor(a) + floor(b) <= floor(a+b), there will always be + // enough rewards to reimburse users + total_reward += (addr_bal * *reward).0; + } + // Provide an allowed conversion from previous timestamp. The + // negative sign allows each instance of the old asset to be + // cancelled out/replaced with the new asset + let old_asset = + Self::encode_asset_type(addr.clone(), self.last_epoch); + let new_asset = + Self::encode_asset_type(addr.clone(), self.block.epoch); + current_convs.insert( + addr.clone(), + (MaspAmount::from_pair(old_asset, -(reward.1 as i64)).unwrap() + + MaspAmount::from_pair(new_asset, reward.1).unwrap() + + MaspAmount::from_pair(reward_asset, reward.0).unwrap()) + .into(), + ); + // Add a conversion from the previous asset type + self.conversion_state.assets.insert( + old_asset, + (addr.clone(), self.last_epoch, MaspAmount::zero().into(), 0), + ); + } + + // Try to distribute Merkle leaf updating as evenly as possible across + // multiple cores + let num_threads = rayon::current_num_threads(); + // Put assets into vector to enable computation batching + let assets: Vec<_> = self + .conversion_state + .assets + .values_mut() + .enumerate() + .collect(); + // ceil(assets.len() / num_threads) + let notes_per_thread_max = (assets.len() - 1) / num_threads + 1; + // floor(assets.len() / num_threads) + let notes_per_thread_min = assets.len() / num_threads; + // Now on each core, add the latest conversion to each conversion + let conv_notes: Vec = assets + .into_par_iter() + .with_min_len(notes_per_thread_min) + .with_max_len(notes_per_thread_max) + .map(|(idx, (addr, _epoch, conv, pos))| { + // Use transitivity to update conversion + *conv += current_convs[addr].clone(); + // Update conversion position to leaf we are about to create + *pos = idx; + // The merkle tree need only provide the conversion commitment, + // the remaining information is provided through the storage API + Node::new(conv.cmu().to_repr()) + }) + .collect(); + + // Update the MASP's transparent reward token balance to ensure that it + // is sufficiently backed to redeem rewards + let reward_key = token::balance_key(&nam(), &masp_addr); + if let Ok((Some(addr_bal), _)) = self.read(&reward_key) { + // If there is already a balance, then add to it + let addr_bal: token::Amount = + types::decode(addr_bal).expect("invalid balance"); + let new_bal = types::encode(&(addr_bal + total_reward)); + self.write(&reward_key, new_bal) + .expect("unable to update MASP transparent balance"); + } else { + // Otherwise the rewards form the entirity of the reward token + // balance + self.write(&reward_key, types::encode(&total_reward)) + .expect("unable to update MASP transparent balance"); + } + // Try to distribute Merkle tree construction as evenly as possible + // across multiple cores + // Merkle trees must have exactly 2^n leaves to be mergeable + let mut notes_per_thread_rounded = 1; + while notes_per_thread_max > notes_per_thread_rounded * 4 { + notes_per_thread_rounded *= 2; + } + // Make the sub-Merkle trees in parallel + let tree_parts: Vec<_> = conv_notes + .par_chunks(notes_per_thread_rounded) + .map(FrozenCommitmentTree::new) + .collect(); + + // Keep the merkle root from the old tree for transactions constructed + // close to the epoch boundary + self.conversion_state.prev_root = self.conversion_state.tree.root(); + + // Convert conversion vector into tree so that Merkle paths can be + // obtained + self.conversion_state.tree = FrozenCommitmentTree::merge(&tree_parts); + + // Add purely decoding entries to the assets map. These will be + // overwritten before the creation of the next commitment tree + for addr in masp_rewards.keys() { + // Add the decoding entry for the new asset type. An uncommited + // node position is used since this is not a conversion. + let new_asset = + Self::encode_asset_type(addr.clone(), self.block.epoch); + self.conversion_state.assets.insert( + new_asset, + ( + addr.clone(), + self.block.epoch, + MaspAmount::zero().into(), + self.conversion_state.tree.size(), + ), + ); + } + + // Save the current conversion state in order to avoid computing + // conversion commitments from scratch in the next epoch + let state_key = key_prefix + .push(&(token::CONVERSION_KEY_PREFIX.to_owned())) + .map_err(Error::KeyError)?; + self.write(&state_key, types::encode(&self.conversion_state)) + .expect("unable to save current conversion state"); + Ok(()) + } + + /// Update the merkle tree with epoch data + fn update_epoch_in_merkle_tree(&mut self) -> Result<()> { + let key_prefix: Key = + Address::Internal(InternalAddress::PoS).to_db_key().into(); + + let key = key_prefix + .push(&"epoch_start_height".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.next_epoch_min_start_height))?; + + let key = key_prefix + .push(&"epoch_start_time".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.next_epoch_min_start_time))?; + + let key = key_prefix + .push(&"current_epoch".to_string()) + .map_err(Error::KeyError)?; + self.block + .tree + .update(&key, types::encode(&self.block.epoch))?; + + Ok(()) + } + + /// Start write batch. + pub fn batch() -> D::WriteBatch { + D::batch() + } + + /// Execute write batch. + pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { + self.db.exec_batch(batch) + } + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + pub fn batch_write_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result { + let value = value.as_ref(); + self.block.tree.update(key, value)?; + self.db + .batch_write_subspace_val(batch, self.block.height, key, value) + } + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + pub fn batch_delete_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result { + self.block.tree.delete(key)?; + self.db + .batch_delete_subspace_val(batch, self.block.height, key) + } +} + +impl<'iter, D, H> StorageRead<'iter> for Storage +where + D: DB + for<'iter_> DBIter<'iter_>, + H: StorageHasher, +{ + type PrefixIter = >::PrefixIter; + + fn read_bytes( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result>, storage_api::Error> { + self.db.read_subspace_val(key).into_storage_result() + } + + fn has_key( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result { + self.block.tree.has_key(key).into_storage_result() + } + + fn iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.iter_prefix(prefix)) + } + + fn rev_iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.rev_iter_prefix(prefix)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> std::result::Result)>, storage_api::Error> + { + Ok(iter.next().map(|(key, val, _gas)| (key, val))) + } + + fn get_chain_id(&self) -> std::result::Result { + Ok(self.chain_id.to_string()) + } + + fn get_block_height( + &self, + ) -> std::result::Result { + Ok(self.block.height) + } + + fn get_block_hash( + &self, + ) -> std::result::Result { + Ok(self.block.hash.clone()) + } + + fn get_block_epoch( + &self, + ) -> std::result::Result { + Ok(self.block.epoch) + } + + fn get_tx_index(&self) -> std::result::Result { + Ok(self.tx_index) + } + + fn get_native_token( + &self, + ) -> std::result::Result { + Ok(self.native_token.clone()) + } +} + +impl StorageWrite for Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::write`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + let val = val.as_ref(); + self.block.tree.update(key, val).into_storage_result()?; + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::delete`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + self.block.tree.delete(key).into_storage_result()?; + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl StorageWrite for &mut Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write( + &mut self, + key: &crate::types::storage::Key, + val: T, + ) -> storage_api::Result<()> { + let val = val.try_to_vec().unwrap(); + self.write_bytes(key, val) + } + + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl From for Error { + fn from(error: MerkleTreeError) -> Self { + Self::MerkleTreeError(error) + } +} + +/// Helpers for testing components that depend on storage +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use super::mockdb::MockDB; + use super::*; + use crate::ledger::storage::traits::Sha256Hasher; + use crate::types::address; + /// Storage with a mock DB for testing + pub type TestStorage = Storage; + + impl Default for TestStorage { + fn default() -> Self { + let chain_id = ChainId::default(); + let tree = MerkleTree::default(); + let block = BlockStorage { + tree, + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), + }; + Self { + db: MockDB::default(), + chain_id, + block, + header: None, + last_height: BlockHeight(0), + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Test address generator seed", + ), + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + #[cfg(feature = "ferveo-tpke")] + tx_queue: TxQueue::default(), + native_token: address::nam(), + } + } + } +} + +#[cfg(test)] +mod tests { + use chrono::{TimeZone, Utc}; + use proptest::prelude::*; + use rust_decimal_macros::dec; + + use super::testing::*; + use super::*; + use crate::ledger::parameters::{self, Parameters}; + use crate::types::time::{self, Duration}; + + prop_compose! { + /// Setup test input data with arbitrary epoch duration, epoch start + /// height and time, and a block height and time that are greater than + /// the epoch start height and time, and the change to be applied to + /// the epoch duration parameters. + fn arb_and_epoch_duration_start_and_block() + ( + start_height in 0..1000_u64, + start_time in 0..10000_i64, + min_num_of_blocks in 1..10_u64, + min_duration in 1..100_i64, + max_expected_time_per_block in 1..100_i64, + ) + ( + min_num_of_blocks in Just(min_num_of_blocks), + min_duration in Just(min_duration), + max_expected_time_per_block in Just(max_expected_time_per_block), + start_height in Just(start_height), + start_time in Just(start_time), + block_height in start_height + 1..(start_height + 2 * min_num_of_blocks), + block_time in start_time + 1..(start_time + 2 * min_duration), + // Delta will be applied on the `min_num_of_blocks` parameter + min_blocks_delta in -(min_num_of_blocks as i64 - 1)..5, + // Delta will be applied on the `min_duration` parameter + min_duration_delta in -(min_duration - 1)..50, + // Delta will be applied on the `max_expected_time_per_block` parameter + max_time_per_block_delta in -(max_expected_time_per_block - 1)..50, + ) -> (EpochDuration, i64, BlockHeight, DateTimeUtc, BlockHeight, DateTimeUtc, + i64, i64, i64) { + let epoch_duration = EpochDuration { + min_num_of_blocks, + min_duration: Duration::seconds(min_duration).into(), + }; + (epoch_duration, max_expected_time_per_block, + BlockHeight(start_height), Utc.timestamp_opt(start_time, 0).single().expect("expected valid timestamp").into(), + BlockHeight(block_height), Utc.timestamp_opt(block_time, 0).single().expect("expected valid timestamp").into(), + min_blocks_delta, min_duration_delta, max_time_per_block_delta) + } + } + + proptest! { + /// Test that: + /// 1. When the minimum blocks have been created since the epoch + /// start height and minimum time passed since the epoch start time, + /// a new epoch must start. + /// 2. When the epoch duration parameters change, the current epoch's + /// duration doesn't change, but the next one does. + #[test] + fn update_epoch_after_its_duration( + (epoch_duration, max_expected_time_per_block, start_height, start_time, block_height, block_time, + min_blocks_delta, min_duration_delta, max_time_per_block_delta) + in arb_and_epoch_duration_start_and_block()) + { + let mut storage = TestStorage { + next_epoch_min_start_height: + start_height + epoch_duration.min_num_of_blocks, + next_epoch_min_start_time: + start_time + epoch_duration.min_duration, + ..Default::default() + }; + let mut parameters = Parameters { + epoch_duration: epoch_duration.clone(), + max_expected_time_per_block: Duration::seconds(max_expected_time_per_block).into(), + vp_whitelist: vec![], + tx_whitelist: vec![], + implicit_vp: vec![], + epochs_per_year: 100, + pos_gain_p: dec!(0.1), + pos_gain_d: dec!(0.1), + staked_ratio: dec!(0.1), + pos_inflation_amount: 0, + }; + parameters.init_storage(&mut storage); + + let epoch_before = storage.last_epoch; + assert_eq!(epoch_before, storage.block.epoch); + + // Try to apply the epoch update + storage.update_epoch(block_height, block_time).unwrap(); + + // Test for 1. + if block_height.0 - start_height.0 + >= epoch_duration.min_num_of_blocks + && time::duration_passed( + block_time, + start_time, + epoch_duration.min_duration, + ) + { + assert_eq!(storage.block.epoch, epoch_before.next()); + assert_eq!(storage.next_epoch_min_start_height, + block_height + epoch_duration.min_num_of_blocks); + assert_eq!(storage.next_epoch_min_start_time, + block_time + epoch_duration.min_duration); + assert_eq!( + storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + Some(epoch_before)); + assert_eq!( + storage.block.pred_epochs.get_epoch(block_height), + Some(epoch_before.next())); + } else { + assert_eq!(storage.block.epoch, epoch_before); + assert_eq!( + storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + Some(epoch_before)); + assert_eq!( + storage.block.pred_epochs.get_epoch(block_height), + Some(epoch_before)); + } + // Last epoch should only change when the block is committed + assert_eq!(storage.last_epoch, epoch_before); + + // Update the epoch duration parameters + parameters.epoch_duration.min_num_of_blocks = + (parameters.epoch_duration.min_num_of_blocks as i64 + min_blocks_delta) as u64; + let min_duration: i64 = parameters.epoch_duration.min_duration.0 as _; + parameters.epoch_duration.min_duration = + Duration::seconds(min_duration + min_duration_delta).into(); + parameters.max_expected_time_per_block = + Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); + parameters::update_max_expected_time_per_block_parameter(&mut storage, ¶meters.max_expected_time_per_block).unwrap(); + parameters::update_epoch_parameter(&mut storage, ¶meters.epoch_duration).unwrap(); + + // Test for 2. + let epoch_before = storage.block.epoch; + let height_of_update = storage.next_epoch_min_start_height.0 ; + let time_of_update = storage.next_epoch_min_start_time; + let height_before_update = BlockHeight(height_of_update - 1); + let height_of_update = BlockHeight(height_of_update); + let time_before_update = time_of_update - Duration::seconds(1); + + // No update should happen before both epoch duration conditions are + // satisfied + storage.update_epoch(height_before_update, time_before_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + storage.update_epoch(height_of_update, time_before_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + storage.update_epoch(height_before_update, time_of_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before); + + // Update should happen at this or after this height and time + storage.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(storage.block.epoch, epoch_before.next()); + // The next epoch's minimum duration should change + assert_eq!(storage.next_epoch_min_start_height, + height_of_update + parameters.epoch_duration.min_num_of_blocks); + assert_eq!(storage.next_epoch_min_start_time, + time_of_update + parameters.epoch_duration.min_duration); + } + } +} diff --git a/shared/src/ledger/storage/traits.rs b/core/src/ledger/storage/traits.rs similarity index 98% rename from shared/src/ledger/storage/traits.rs rename to core/src/ledger/storage/traits.rs index b615abaa989..6e109ee53e2 100644 --- a/shared/src/ledger/storage/traits.rs +++ b/core/src/ledger/storage/traits.rs @@ -10,13 +10,15 @@ use ics23::commitment_proof::Proof as Ics23Proof; use ics23::{CommitmentProof, ExistenceProof}; use sha2::{Digest, Sha256}; +use super::ics23_specs; use super::merkle_tree::{Amt, Error, Smt}; -use super::{ics23_specs, IBC_KEY_LIMIT}; use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolTree; use crate::ledger::storage::merkle_tree::StorageBytes; use crate::types::eth_bridge_pool::PendingTransfer; use crate::types::hash::Hash; -use crate::types::storage::{Key, MembershipProof, StringKey, TreeBytes}; +use crate::types::storage::{ + Key, MembershipProof, StringKey, TreeBytes, IBC_KEY_LIMIT, +}; /// Trait for reading from a merkle tree that is a sub-tree /// of the global merkle tree. diff --git a/shared/src/ledger/storage/types.rs b/core/src/ledger/storage/types.rs similarity index 100% rename from shared/src/ledger/storage/types.rs rename to core/src/ledger/storage/types.rs diff --git a/shared/src/ledger/storage_api/collections/lazy_map.rs b/core/src/ledger/storage_api/collections/lazy_map.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/lazy_map.rs rename to core/src/ledger/storage_api/collections/lazy_map.rs diff --git a/shared/src/ledger/storage_api/collections/lazy_vec.rs b/core/src/ledger/storage_api/collections/lazy_vec.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/lazy_vec.rs rename to core/src/ledger/storage_api/collections/lazy_vec.rs diff --git a/shared/src/ledger/storage_api/collections/mod.rs b/core/src/ledger/storage_api/collections/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/collections/mod.rs rename to core/src/ledger/storage_api/collections/mod.rs diff --git a/shared/src/ledger/storage_api/error.rs b/core/src/ledger/storage_api/error.rs similarity index 100% rename from shared/src/ledger/storage_api/error.rs rename to core/src/ledger/storage_api/error.rs diff --git a/core/src/ledger/storage_api/key.rs b/core/src/ledger/storage_api/key.rs new file mode 100644 index 00000000000..06b3c76bade --- /dev/null +++ b/core/src/ledger/storage_api/key.rs @@ -0,0 +1,26 @@ +//! Cryptographic signature keys storage API + +use super::*; +use crate::types::address::Address; +use crate::types::key::*; + +/// Get the public key associated with the given address. Returns `Ok(None)` if +/// not found. +pub fn get(storage: &S, owner: &Address) -> Result> +where + S: for<'iter> StorageRead<'iter>, +{ + let key = pk_key(owner); + storage.read(&key) +} + +/// Reveal a PK of an implicit account - the PK is written into the storage +/// of the address derived from the PK. +pub fn reveal_pk(storage: &mut S, pk: &common::PublicKey) -> Result<()> +where + S: StorageWrite, +{ + let addr: Address = pk.into(); + let key = pk_key(&addr); + storage.write(&key, pk) +} diff --git a/shared/src/ledger/storage_api/mod.rs b/core/src/ledger/storage_api/mod.rs similarity index 98% rename from shared/src/ledger/storage_api/mod.rs rename to core/src/ledger/storage_api/mod.rs index a762cabb674..7c842a61362 100644 --- a/shared/src/ledger/storage_api/mod.rs +++ b/core/src/ledger/storage_api/mod.rs @@ -3,12 +3,13 @@ pub mod collections; mod error; -pub mod queries; +pub mod key; pub mod validation; use borsh::{BorshDeserialize, BorshSerialize}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; +use crate::types::address::Address; use crate::types::storage::{self, BlockHash, BlockHeight, Epoch, TxIndex}; /// Common storage read interface @@ -100,6 +101,9 @@ pub trait StorageRead<'iter> { /// Get the transaction index. fn get_tx_index(&self) -> Result; + + /// Get the native token address + fn get_native_token(&self) -> Result
; } /// Common storage write interface diff --git a/shared/src/ledger/storage_api/validation/mod.rs b/core/src/ledger/storage_api/validation/mod.rs similarity index 100% rename from shared/src/ledger/storage_api/validation/mod.rs rename to core/src/ledger/storage_api/validation/mod.rs diff --git a/shared/src/ledger/tx_env.rs b/core/src/ledger/tx_env.rs similarity index 100% rename from shared/src/ledger/tx_env.rs rename to core/src/ledger/tx_env.rs diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs new file mode 100644 index 00000000000..49bd5d515c4 --- /dev/null +++ b/core/src/ledger/vp_env.rs @@ -0,0 +1,180 @@ +//! Validity predicate environment contains functions that can be called from +//! inside validity predicates. + +use borsh::BorshDeserialize; + +use super::storage_api::{self, StorageRead}; +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::key::common; +use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key, TxIndex}; + +/// Validity predicate's environment is available for native VPs and WASM VPs +pub trait VpEnv<'view> { + /// Storage read prefix iterator + type PrefixIter; + + /// Type to read storage state before the transaction execution + type Pre: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Type to read storage state after the transaction execution + type Post: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Read storage state before the transaction execution + fn pre(&'view self) -> Self::Pre; + + /// Read storage state after the transaction execution + fn post(&'view self) -> Self::Post; + + /// Storage read temporary state Borsh encoded value (after tx execution). + /// It will try to read from only the write log and then decode it if + /// found. + fn read_temp( + &self, + key: &Key, + ) -> Result, storage_api::Error>; + + /// Storage read temporary state raw bytes (after tx execution). It will try + /// to read from only the write log. + fn read_bytes_temp( + &self, + key: &Key, + ) -> Result>, storage_api::Error>; + + /// Getting the chain ID. + fn get_chain_id(&'view self) -> Result; + + /// Getting the block height. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_height(&'view self) + -> Result; + + /// Getting the block hash. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_hash(&'view self) -> Result; + + /// Getting the block epoch. The epoch is that of the block to which the + /// current transaction is being applied. + fn get_block_epoch(&'view self) -> Result; + + /// Get the shielded transaction index. + fn get_tx_index(&'view self) -> Result; + + /// Get the address of the native token. + fn get_native_token(&'view self) -> Result; + + /// Storage prefix iterator, ordered by storage keys. It will try to get an + /// iterator from the storage. + fn iter_prefix( + &'view self, + prefix: &Key, + ) -> Result; + + /// Storage prefix iterator, reverse ordered by storage keys. It will try to + /// get an iterator from the storage. + fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> Result; + + /// Evaluate a validity predicate with given data. The address, changed + /// storage keys and verifiers will have the same values as the input to + /// caller's validity predicate. + /// + /// If the execution fails for whatever reason, this will return `false`. + /// Otherwise returns the result of evaluation. + fn eval( + &self, + vp_code: Vec, + input_data: Vec, + ) -> Result; + + /// Verify a transaction signature. The signature is expected to have been + /// produced on the encoded transaction [`crate::proto::Tx`] + /// using [`crate::proto::Tx::sign`]. + fn verify_tx_signature( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> Result; + + /// Get a tx hash + fn get_tx_code_hash(&self) -> Result; + + /// Verify a MASP transaction + fn verify_masp(&self, tx: Vec) -> Result; + + // ---- Methods below have default implementation via `pre/post` ---- + + /// Storage read prior state Borsh encoded value (before tx execution). It + /// will try to read from the storage and decode it if found. + fn read_pre( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.pre().read(key) + } + + /// Storage read prior state raw bytes (before tx execution). It + /// will try to read from the storage. + fn read_bytes_pre( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.pre().read_bytes(key) + } + + /// Storage read posterior state Borsh encoded value (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage and then decode it if found. + fn read_post( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.post().read(key) + } + + /// Storage read posterior state raw bytes (after tx execution). It will try + /// to read from the write log first and if no entry found then from the + /// storage. + fn read_bytes_post( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.post().read_bytes(key) + } + + /// Storage `has_key` in prior state (before tx execution). It will try to + /// read from the storage. + fn has_key_pre(&'view self, key: &Key) -> Result { + self.pre().has_key(key) + } + + /// Storage `has_key` in posterior state (after tx execution). It will try + /// to check the write log first and if no entry found then the storage. + fn has_key_post( + &'view self, + key: &Key, + ) -> Result { + self.post().has_key(key) + } + + /// Storage prefix iterator for prior state (before tx execution). It will + /// try to read from the storage. + fn iter_pre_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.pre().iter_next(iter) + } + + /// Storage prefix iterator next for posterior state (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage. + fn iter_post_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.post().iter_next(iter) + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs new file mode 100644 index 00000000000..c9bd40084e1 --- /dev/null +++ b/core/src/lib.rs @@ -0,0 +1,26 @@ +//! The core public types, storage_api, VpEnv and TxEnv. + +#![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] +#![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] +#![warn(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] + +pub mod bytes; +pub mod ledger; +pub mod proto; +pub mod types; + +#[cfg(feature = "abciplus")] +pub use {ibc, ibc_proto, tendermint, tendermint_proto}; +#[cfg(feature = "abcipp")] +pub use { + ibc_abcipp as ibc, ibc_proto_abcipp as ibc_proto, + tendermint_abcipp as tendermint, + tendermint_proto_abcipp as tendermint_proto, +}; + +// A handy macro for tests +#[cfg(test)] +#[macro_use] +extern crate assert_matches; diff --git a/shared/src/proto/generated.rs b/core/src/proto/generated.rs similarity index 100% rename from shared/src/proto/generated.rs rename to core/src/proto/generated.rs diff --git a/shared/src/proto/generated/.gitignore b/core/src/proto/generated/.gitignore similarity index 100% rename from shared/src/proto/generated/.gitignore rename to core/src/proto/generated/.gitignore diff --git a/shared/src/proto/mod.rs b/core/src/proto/mod.rs similarity index 100% rename from shared/src/proto/mod.rs rename to core/src/proto/mod.rs diff --git a/shared/src/proto/types.rs b/core/src/proto/types.rs similarity index 83% rename from shared/src/proto/types.rs rename to core/src/proto/types.rs index c771a21b880..a34fddf4148 100644 --- a/shared/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -7,25 +7,11 @@ use borsh::schema::{Declaration, Definition}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use prost::Message; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "ABCI"))] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto::abci::Event; -#[cfg(not(feature = "ABCI"))] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto::abci::EventAttribute; -#[cfg(not(feature = "ABCI"))] -use tendermint_proto::abci::ResponseDeliverTx; -#[cfg(feature = "ABCI")] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto_abci::abci::Event; -#[cfg(feature = "ABCI")] -#[cfg(feature = "ferveo-tpke")] -use tendermint_proto_abci::abci::EventAttribute; -#[cfg(feature = "ABCI")] -use tendermint_proto_abci::abci::ResponseDeliverTx; use thiserror::Error; use super::generated::types; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +use crate::tendermint_proto::abci::ResponseDeliverTx; use crate::types::key::*; use crate::types::time::DateTimeUtc; #[cfg(feature = "ferveo-tpke")] @@ -186,6 +172,100 @@ impl> Signed { } } +/// A Tx with its code replaced by a hash salted with the Borsh +/// serialized timestamp of the transaction. This structure will almost +/// certainly be smaller than a Tx, yet in the usual cases it contains +/// enough information to confirm that the Tx is as intended and make a +/// non-malleable signature. +#[derive( + Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema, Hash, +)] +pub struct SigningTx { + pub code_hash: [u8; 32], + pub data: Option>, + pub timestamp: DateTimeUtc, +} + +impl SigningTx { + pub fn hash(&self) -> [u8; 32] { + let timestamp = Some(self.timestamp.into()); + let mut bytes = vec![]; + types::Tx { + code: self.code_hash.to_vec(), + data: self.data.clone(), + timestamp, + } + .encode(&mut bytes) + .expect("encoding a transaction failed"); + hash_tx(&bytes).0 + } + + /// Sign a transaction using [`SignedTxData`]. + pub fn sign(self, keypair: &common::SecretKey) -> Self { + let to_sign = self.hash(); + let sig = common::SigScheme::sign(keypair, to_sign); + let signed = SignedTxData { + data: self.data, + sig, + } + .try_to_vec() + .expect("Encoding transaction data shouldn't fail"); + SigningTx { + code_hash: self.code_hash, + data: Some(signed), + timestamp: self.timestamp, + } + } + + /// Verify that the transaction has been signed by the secret key + /// counterpart of the given public key. + pub fn verify_sig( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> std::result::Result<(), VerifySigError> { + // Try to get the transaction data from decoded `SignedTxData` + let tx_data = self.data.clone().ok_or(VerifySigError::MissingData)?; + let signed_tx_data = SignedTxData::try_from_slice(&tx_data[..]) + .expect("Decoding transaction data shouldn't fail"); + let data = signed_tx_data.data; + let tx = SigningTx { + code_hash: self.code_hash, + data, + timestamp: self.timestamp, + }; + let signed_data = tx.hash(); + common::SigScheme::verify_signature_raw(pk, &signed_data, sig) + } + + /// Expand this reduced Tx using the supplied code only if the the code + /// hashes to the stored code hash + pub fn expand(self, code: Vec) -> Option { + if hash_tx(&code).0 == self.code_hash { + Some(Tx { + code, + data: self.data, + timestamp: self.timestamp, + }) + } else { + None + } + } +} + +impl From for SigningTx { + fn from(tx: Tx) -> SigningTx { + SigningTx { + code_hash: hash_tx(&tx.code).0, + data: tx.data, + timestamp: tx.timestamp, + } + } +} + +/// A SigningTx but with the full code embedded. This structure will almost +/// certainly be bigger than SigningTxs and contains enough information to +/// execute the transaction. #[derive( Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema, Hash, )] @@ -223,6 +303,7 @@ impl From for types::Tx { } } +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] impl From for ResponseDeliverTx { #[cfg(not(feature = "ferveo-tpke"))] fn from(_tx: Tx) -> ResponseDeliverTx { @@ -232,6 +313,8 @@ impl From for ResponseDeliverTx { /// Annotate the Tx with meta-data based on its contents #[cfg(feature = "ferveo-tpke")] fn from(tx: Tx) -> ResponseDeliverTx { + use crate::tendermint_proto::abci::{Event, EventAttribute}; + #[cfg(feature = "ABCI")] fn encode_str(x: &str) -> Vec { x.as_bytes().to_vec() @@ -320,28 +403,20 @@ impl Tx { } pub fn hash(&self) -> [u8; 32] { - hash_tx(&self.to_bytes()).0 + SigningTx::from(self.clone()).hash() } pub fn code_hash(&self) -> [u8; 32] { - hash_tx(&self.code).0 + SigningTx::from(self.clone()).code_hash } /// Sign a transaction using [`SignedTxData`]. pub fn sign(self, keypair: &common::SecretKey) -> Self { - let to_sign = self.hash(); - let sig = common::SigScheme::sign(keypair, &to_sign); - let signed = SignedTxData { - data: self.data, - sig, - } - .try_to_vec() - .expect("Encoding transaction data shouldn't fail"); - Tx { - code: self.code, - data: Some(signed), - timestamp: self.timestamp, - } + let code = self.code.clone(); + SigningTx::from(self) + .sign(keypair) + .expand(code) + .expect("code hashes to unexpected value") } /// Verify that the transaction has been signed by the secret key @@ -351,18 +426,7 @@ impl Tx { pk: &common::PublicKey, sig: &common::Signature, ) -> std::result::Result<(), VerifySigError> { - // Try to get the transaction data from decoded `SignedTxData` - let tx_data = self.data.clone().ok_or(VerifySigError::MissingData)?; - let signed_tx_data = SignedTxData::try_from_slice(&tx_data[..]) - .expect("Decoding transaction data shouldn't fail"); - let data = signed_tx_data.data; - let tx = Tx { - code: self.code.clone(), - data, - timestamp: self.timestamp, - }; - let signed_data = tx.hash(); - common::SigScheme::verify_signature_raw(pk, &signed_data, sig) + SigningTx::from(self.clone()).verify_sig(pk, sig) } } diff --git a/shared/src/types/address.rs b/core/src/types/address.rs similarity index 100% rename from shared/src/types/address.rs rename to core/src/types/address.rs diff --git a/shared/src/types/chain.rs b/core/src/types/chain.rs similarity index 98% rename from shared/src/types/chain.rs rename to core/src/types/chain.rs index 18627903cb5..06a5d3938c0 100644 --- a/shared/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -18,11 +18,11 @@ pub const CHAIN_ID_PREFIX_SEP: char = '.'; /// Development default chain ID. Must be [`CHAIN_ID_LENGTH`] long. #[cfg(feature = "dev")] -pub const DEFAULT_CHAIN_ID: &str = "anoma-devchain.000000000000000"; +pub const DEFAULT_CHAIN_ID: &str = "namada-devchain.00000000000000"; /// Release default chain ID. Must be [`CHAIN_ID_LENGTH`] long. #[cfg(not(feature = "dev"))] -pub const DEFAULT_CHAIN_ID: &str = "anoma-internal.000000000000000"; +pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; /// Chain ID #[derive( diff --git a/shared/src/types/eth_abi.rs b/core/src/types/eth_abi.rs similarity index 90% rename from shared/src/types/eth_abi.rs rename to core/src/types/eth_abi.rs index 6b1f25ea50d..75f0dd1f257 100644 --- a/shared/src/types/eth_abi.rs +++ b/core/src/types/eth_abi.rs @@ -103,6 +103,7 @@ impl Encode for AbiEncode { mod tests { use std::convert::TryInto; + use data_encoding::HEXLOWER; use ethabi::ethereum_types::U256; use super::*; @@ -112,7 +113,9 @@ mod tests { #[test] fn test_abi_encode() { let expected = "0x000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000047465737400000000000000000000000000000000000000000000000000000000"; - let expected = hex::decode(&expected[2..]).expect("Test failed"); + let expected = HEXLOWER + .decode(&expected.as_bytes()[2..]) + .expect("Test failed"); let got = AbiEncode::encode(&[ Token::Uint(U256::from(42u64)), Token::String("test".into()), @@ -127,13 +130,15 @@ mod tests { "1c8aff950685c2ed4bc3174f3472287b56d9517b9c948127319a09a7a36deac8"; assert_eq!( expected, - &hex::encode({ - let mut st = Keccak::v256(); - let mut output = [0; 32]; - st.update(b"hello"); - st.finalize(&mut output); - output - }) + &HEXLOWER.encode( + &{ + let mut st = Keccak::v256(); + let mut output = [0; 32]; + st.update(b"hello"); + st.finalize(&mut output); + output + }[..] + ) ); } diff --git a/shared/src/types/eth_bridge_pool.rs b/core/src/types/eth_bridge_pool.rs similarity index 100% rename from shared/src/types/eth_bridge_pool.rs rename to core/src/types/eth_bridge_pool.rs diff --git a/shared/src/types/ethereum_events.rs b/core/src/types/ethereum_events.rs similarity index 93% rename from shared/src/types/ethereum_events.rs rename to core/src/types/ethereum_events.rs index 32aa10a7d4a..f3b6af9d5a7 100644 --- a/shared/src/types/ethereum_events.rs +++ b/core/src/types/ethereum_events.rs @@ -1,4 +1,4 @@ -//! Types representing data intended for Anoma via Ethereum events +//! Types representing data intended for Namada via Ethereum events use std::fmt::Display; use std::str::FromStr; @@ -14,7 +14,7 @@ use crate::types::keccak::KeccakHash; use crate::types::storage::{DbKeySeg, KeySeg}; use crate::types::token::Amount; -/// Anoma native type to replace the ethabi::Uint type +/// Namada native type to replace the ethabi::Uint type #[derive( Clone, Debug, @@ -127,7 +127,7 @@ impl KeySeg for EthAddress { } } -/// An Ethereum event to be processed by the Anoma ledger +/// An Ethereum event to be processed by the Namada ledger #[derive( PartialEq, Eq, @@ -142,7 +142,7 @@ impl KeySeg for EthAddress { )] pub enum EthereumEvent { /// Event transferring batches of ether or Ethereum based ERC20 tokens - /// from Ethereum to wrapped assets on Anoma + /// from Ethereum to wrapped assets on Namada TransfersToNamada { /// Monotonically increasing nonce #[allow(dead_code)] @@ -152,7 +152,7 @@ pub enum EthereumEvent { transfers: Vec, }, /// A confirmation event that a batch of transfers have been made - /// from Anoma to Ethereum + /// from Namada to Ethereum TransfersToEthereum { /// Monotonically increasing nonce #[allow(dead_code)] @@ -209,11 +209,11 @@ impl EthereumEvent { /// SHA256 of the Borsh serialization of the [`EthereumEvent`]. pub fn hash(&self) -> Result { let bytes = self.try_to_vec()?; - Ok(Hash::sha256(&bytes)) + Ok(Hash::sha256(bytes)) } } -/// An event transferring some kind of value from Ethereum to Anoma +/// An event transferring some kind of value from Ethereum to Namada #[derive( Clone, Debug, @@ -231,11 +231,11 @@ pub struct TransferToNamada { pub amount: Amount, /// Address of the smart contract issuing the token pub asset: EthAddress, - /// The address receiving wrapped assets on Anoma + /// The address receiving wrapped assets on Namada pub receiver: Address, } -/// An event transferring some kind of value from Anoma to Ethereum +/// An event transferring some kind of value from Namada to Ethereum #[derive( Clone, Debug, @@ -340,10 +340,8 @@ pub mod tests { /// Test helpers #[cfg(any(test, feature = "testing"))] pub mod testing { - use namada_proof_of_stake::types::VotingPower; - use super::*; - use crate::types::token::Amount; + use crate::types::token::{self, Amount}; pub const DAI_ERC20_ETH_ADDRESS_CHECKSUMMED: &str = "0x6B175474E89094C44Da98b954EedeAC495271d0F"; @@ -374,8 +372,8 @@ pub mod testing { Amount::from(1_000) } - pub fn arbitrary_voting_power() -> VotingPower { - VotingPower::from(1_000) + pub fn arbitrary_bonded_stake() -> token::Amount { + token::Amount::from(1_000) } /// A [`EthereumEvent::TransfersToNamada`] containing a single transfer of diff --git a/shared/src/types/governance.rs b/core/src/types/governance.rs similarity index 85% rename from shared/src/types/governance.rs rename to core/src/types/governance.rs index 5f82335cb2b..438017a3709 100644 --- a/shared/src/types/governance.rs +++ b/core/src/types/governance.rs @@ -9,13 +9,12 @@ use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::address::Address; -use super::hash::Hash; -use super::key::common::{self, Signature}; -use super::key::SigScheme; -use super::storage::Epoch; -use super::token::SCALE; -use super::transaction::governance::InitProposalData; +use crate::types::address::Address; +use crate::types::hash::Hash; +use crate::types::key::common::{self, Signature}; +use crate::types::key::SigScheme; +use crate::types::storage::Epoch; +use crate::types::token::SCALE; /// Type alias for vote power pub type VotePower = u128; @@ -84,8 +83,8 @@ pub enum TallyResult { Passed, /// Proposal was rejected Rejected, - /// Proposal result is unknown - Unknown, + /// A critical error in tally computation + Failed, } /// The result with votes of a proposal @@ -124,7 +123,7 @@ impl Display for TallyResult { match self { TallyResult::Passed => write!(f, "passed"), TallyResult::Rejected => write!(f, "rejected"), - TallyResult::Unknown => write!(f, "unknown"), + TallyResult::Failed => write!(f, "failed"), } } } @@ -163,31 +162,6 @@ pub enum ProposalError { InvalidProposalData, } -impl TryFrom for InitProposalData { - type Error = ProposalError; - - fn try_from(proposal: Proposal) -> Result { - let proposal_code = if let Some(path) = proposal.proposal_code_path { - match std::fs::read(path) { - Ok(bytes) => Some(bytes), - Err(_) => return Err(Self::Error::InvalidProposalData), - } - } else { - None - }; - - Ok(InitProposalData { - id: proposal.id, - content: proposal.content.try_to_vec().unwrap(), - author: proposal.author, - voting_start_epoch: proposal.voting_start_epoch, - voting_end_epoch: proposal.voting_end_epoch, - grace_epoch: proposal.grace_epoch, - proposal_code, - }) - } -} - #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Serialize, Deserialize, )] @@ -224,9 +198,9 @@ impl OfflineProposal { tally_epoch_serialized, ] .concat(); - let proposal_data_hash = Hash::sha256(&proposal_serialized); + let proposal_data_hash = Hash::sha256(proposal_serialized); let signature = - common::SigScheme::sign(signing_key, &proposal_data_hash); + common::SigScheme::sign(signing_key, proposal_data_hash); Self { content: proposal.content, author: proposal.author, @@ -261,7 +235,7 @@ impl OfflineProposal { tally_epoch_serialized, ] .concat(); - Hash::sha256(&proposal_serialized) + Hash::sha256(proposal_serialized) } } @@ -297,7 +271,7 @@ impl OfflineVote { .expect("Conversion to bytes shouldn't fail."); let vote_serialized = &[proposal_hash_data, proposal_vote_data].concat(); - let signature = common::SigScheme::sign(signing_key, &vote_serialized); + let signature = common::SigScheme::sign(signing_key, vote_serialized); Self { proposal_hash, vote, diff --git a/shared/src/types/hash.rs b/core/src/types/hash.rs similarity index 80% rename from shared/src/types/hash.rs rename to core/src/types/hash.rs index 99d058967d8..01c38de5eeb 100644 --- a/shared/src/types/hash.rs +++ b/core/src/types/hash.rs @@ -4,17 +4,13 @@ use std::fmt::{self, Display}; use std::ops::Deref; use std::str::FromStr; -use arse_merkle_tree::traits::Value; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use hex::FromHex; +use data_encoding::HEXUPPER; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::tendermint::abci::transaction; -use crate::tendermint::Hash as TmHash; - -/// The length of the raw transaction hash. +/// The length of the transaction hash string pub const HASH_LENGTH: usize = 32; /// The length of the hex encoded transaction hash. @@ -28,7 +24,7 @@ pub enum Error { #[error("Failed trying to convert slice to a hash: {0}")] ConversionFailed(std::array::TryFromSliceError), #[error("Failed to convert string into a hash: {0}")] - FromStringError(hex::FromHexError), + FromStringError(data_encoding::DecodeError), } /// Result for functions that may fail @@ -48,14 +44,11 @@ pub type HashResult = std::result::Result; Deserialize, )] /// A hash, typically a sha-2 hash of a tx -pub struct Hash(pub [u8; 32]); +pub struct Hash(pub [u8; HASH_LENGTH]); impl Display for Hash { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for byte in &self.0 { - write!(f, "{:02X}", byte)?; - } - Ok(()) + write!(f, "{}", HEXUPPER.encode(&self.0)) } } @@ -66,7 +59,7 @@ impl AsRef<[u8]> for Hash { } impl Deref for Hash { - type Target = [u8; 32]; + type Target = [u8; HASH_LENGTH]; fn deref(&self) -> &Self::Target { &self.0 @@ -86,7 +79,7 @@ impl TryFrom<&[u8]> for Hash { ), }); } - let hash: [u8; 32] = + let hash: [u8; HASH_LENGTH] = TryFrom::try_from(value).map_err(Error::ConversionFailed)?; Ok(Hash(hash)) } @@ -104,16 +97,10 @@ impl TryFrom<&str> for Hash { type Error = self::Error; fn try_from(string: &str) -> HashResult { - Ok(Self( - <[u8; HASH_LENGTH]>::from_hex(string) - .map_err(Error::FromStringError)?, - )) - } -} - -impl From for transaction::Hash { - fn from(hash: Hash) -> Self { - Self::new(hash.0) + let vec = HEXUPPER + .decode(string.to_uppercase().as_ref()) + .map_err(Error::FromStringError)?; + Self::try_from(&vec[..]) } } @@ -132,15 +119,27 @@ impl Hash { Self(*digest.as_ref()) } + fn zero() -> Self { + Self([0u8; HASH_LENGTH]) + } + /// Check if the hash is all zeros pub fn is_zero(&self) -> bool { self == &Self::zero() } } -impl From for TmHash { +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::abci::transaction::Hash { + fn from(hash: Hash) -> Self { + Self::new(hash.0) + } +} + +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl From for crate::tendermint::Hash { fn from(hash: Hash) -> Self { - TmHash::Sha256(hash.0) + Self::Sha256(hash.0) } } diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs new file mode 100644 index 00000000000..3d537cb0259 --- /dev/null +++ b/core/src/types/ibc.rs @@ -0,0 +1,74 @@ +//! IBC event without IBC-related data types + +use std::collections::HashMap; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; + +/// Wrapped IbcEvent +#[derive( + Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, +)] +pub struct IbcEvent { + /// The IBC event type + pub event_type: String, + /// The attributes of the IBC event + pub attributes: HashMap, +} + +impl std::fmt::Display for IbcEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let attributes = self + .attributes + .iter() + .map(|(k, v)| format!("{}: {};", k, v)) + .collect::>() + .join(", "); + write!( + f, + "Event type: {}, Attributes: {}", + self.event_type, attributes + ) + } +} + +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +mod ibc_rs_conversion { + use std::collections::HashMap; + + use thiserror::Error; + + use super::IbcEvent; + use crate::ibc::events::{Error as IbcEventError, IbcEvent as RawIbcEvent}; + use crate::tendermint::abci::Event as AbciEvent; + + #[allow(missing_docs)] + #[derive(Error, Debug)] + pub enum Error { + #[error("IBC event error: {0}")] + IbcEvent(IbcEventError), + } + + /// Conversion functions result + pub type Result = std::result::Result; + + impl TryFrom for IbcEvent { + type Error = Error; + + fn try_from(e: RawIbcEvent) -> Result { + let event_type = e.event_type().as_str().to_string(); + let abci_event = AbciEvent::try_from(e).map_err(Error::IbcEvent)?; + let attributes: HashMap<_, _> = abci_event + .attributes + .iter() + .map(|tag| (tag.key.to_string(), tag.value.to_string())) + .collect(); + Ok(Self { + event_type, + attributes, + }) + } + } +} + +#[cfg(any(feature = "abciplus", feature = "abcipp"))] +pub use ibc_rs_conversion::*; diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs new file mode 100644 index 00000000000..848c09bec16 --- /dev/null +++ b/core/src/types/internal.rs @@ -0,0 +1,82 @@ +//! Shared internal types between the host env and guest (wasm). + +use borsh::{BorshDeserialize, BorshSerialize}; + +/// A result of a wasm call to host functions that may fail. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum HostEnvResult { + /// A success + Success = 1, + /// A non-fatal failure does **not** interrupt WASM execution + Fail = -1, +} + +/// Key-value pair represents data from account's subspace. +/// It is used for prefix iterator's WASM host_env functions. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct KeyVal { + /// The storage key + pub key: String, + /// The value as arbitrary bytes + pub val: Vec, +} + +impl HostEnvResult { + /// Convert result to `i64`, which can be passed to wasm + pub fn to_i64(self) -> i64 { + self as _ + } + + /// Check if the given result as `i64` is a success + pub fn is_success(int: i64) -> bool { + int == Self::Success.to_i64() + } + + /// Check if the given result as `i64` is a non-fatal failure + pub fn is_fail(int: i64) -> bool { + int == Self::Fail.to_i64() + } +} + +impl From for HostEnvResult { + fn from(success: bool) -> Self { + if success { Self::Success } else { Self::Fail } + } +} + +#[cfg(feature = "ferveo-tpke")] +mod tx_queue { + use borsh::{BorshDeserialize, BorshSerialize}; + + use crate::types::transaction::WrapperTx; + + #[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] + /// Wrapper txs to be decrypted in the next block proposal + pub struct TxQueue(std::collections::VecDeque); + + impl TxQueue { + /// Add a new wrapper at the back of the queue + pub fn push(&mut self, wrapper: WrapperTx) { + self.0.push_back(wrapper); + } + + /// Remove the wrapper at the head of the queue + pub fn pop(&mut self) -> Option { + self.0.pop_front() + } + + /// Get an iterator over the queue + pub fn iter(&self) -> impl std::iter::Iterator { + self.0.iter() + } + + /// Check if there are any txs in the queue + #[allow(dead_code)] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + } +} + +#[cfg(feature = "ferveo-tpke")] +pub use tx_queue::TxQueue; diff --git a/shared/src/types/keccak.rs b/core/src/types/keccak.rs similarity index 92% rename from shared/src/types/keccak.rs rename to core/src/types/keccak.rs index b1e04c86914..a2ee9c0d871 100644 --- a/shared/src/types/keccak.rs +++ b/core/src/types/keccak.rs @@ -5,7 +5,7 @@ use std::convert::{TryFrom, TryInto}; use std::fmt::Display; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use hex::FromHex; +use data_encoding::HEXUPPER; use thiserror::Error; use tiny_keccak::{Hasher, Keccak}; @@ -20,7 +20,7 @@ pub enum TryFromError { #[error("Failed trying to convert slice to a hash: {0}")] ConversionFailed(std::array::TryFromSliceError), #[error("Failed to convert string into a hash: {0}")] - FromStringError(hex::FromHexError), + FromStringError(data_encoding::DecodeError), } /// Represents a Keccak hash. @@ -84,8 +84,9 @@ impl TryFrom<&str> for KeccakHash { type Error = TryFromError; fn try_from(string: &str) -> Result { - let bytes: Vec = - Vec::from_hex(string).map_err(TryFromError::FromStringError)?; + let bytes: Vec = HEXUPPER + .decode(string.as_bytes()) + .map_err(TryFromError::FromStringError)?; Self::try_from(bytes.as_slice()) } } diff --git a/shared/src/types/key/common.rs b/core/src/types/key/common.rs similarity index 96% rename from shared/src/types/key/common.rs rename to core/src/types/key/common.rs index 8144acf4660..633367053cc 100644 --- a/shared/src/types/key/common.rs +++ b/core/src/types/key/common.rs @@ -6,16 +6,15 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXLOWER; -use namada_proof_of_stake::types::PublicKeyTmRawHash; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; use thiserror::Error; use super::{ - ed25519, secp256k1, tm_consensus_key_raw_hash, ParsePublicKeyError, - ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, - SigScheme as SigSchemeTrait, VerifySigError, + ed25519, secp256k1, ParsePublicKeyError, ParseSecretKeyError, + ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, + VerifySigError, }; use crate::types::ethereum_events::EthAddress; @@ -345,9 +344,3 @@ impl super::SigScheme for SigScheme { } } } - -impl PublicKeyTmRawHash for PublicKey { - fn tm_raw_hash(&self) -> String { - tm_consensus_key_raw_hash(self) - } -} diff --git a/shared/src/types/key/dkg_session_keys.rs b/core/src/types/key/dkg_session_keys.rs similarity index 100% rename from shared/src/types/key/dkg_session_keys.rs rename to core/src/types/key/dkg_session_keys.rs diff --git a/shared/src/types/key/ed25519.rs b/core/src/types/key/ed25519.rs similarity index 100% rename from shared/src/types/key/ed25519.rs rename to core/src/types/key/ed25519.rs diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs new file mode 100644 index 00000000000..9b4efd6f212 --- /dev/null +++ b/core/src/types/key/mod.rs @@ -0,0 +1,563 @@ +//! Cryptographic keys + +pub mod common; +/// Elliptic curve keys for the DKG +pub mod dkg_session_keys; +pub mod ed25519; +pub mod secp256k1; + +use std::fmt::{Debug, Display}; +use std::hash::Hash; +use std::str::FromStr; + +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXUPPER; +#[cfg(feature = "rand")] +use rand::{CryptoRng, RngCore}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use thiserror::Error; + +use super::address::Address; +use super::storage::{self, DbKeySeg, Key, KeySeg}; +use crate::types::address; + +const PK_STORAGE_KEY: &str = "public_key"; +const PROTOCOL_PK_STORAGE_KEY: &str = "protocol_public_key"; + +/// Obtain a storage key for user's public key. +pub fn pk_key(owner: &Address) -> storage::Key { + Key::from(owner.to_db_key()) + .push(&PK_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Check if the given storage key is a public key. If it is, returns the owner. +pub fn is_pk_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [DbKeySeg::AddressSeg(owner), DbKeySeg::StringSeg(key)] + if key == PK_STORAGE_KEY => + { + Some(owner) + } + _ => None, + } +} + +/// Obtain a storage key for user's protocol public key. +pub fn protocol_pk_key(owner: &Address) -> storage::Key { + Key::from(owner.to_db_key()) + .push(&PROTOCOL_PK_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Check if the given storage key is a public key. If it is, returns the owner. +pub fn is_protocol_pk_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [DbKeySeg::AddressSeg(owner), DbKeySeg::StringSeg(key)] + if key == PROTOCOL_PK_STORAGE_KEY => + { + Some(owner) + } + _ => None, + } +} + +/// Represents an error in signature verification +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum VerifySigError { + #[error("Signature verification failed: {0}")] + SigVerifyError(String), + #[error("Signature verification failed to encode the data: {0}")] + DataEncodingError(std::io::Error), + #[error("Transaction doesn't have any data with a signature.")] + MissingData, + #[error("Signature belongs to a different scheme from the public key.")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParsePublicKeyError { + #[error("Invalid public key hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid public key encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed public key does not belong to desired scheme")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseSignatureError { + #[error("Invalid signature hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid signature encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed signature does not belong to desired scheme")] + MismatchedScheme, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseSecretKeyError { + #[error("Invalid secret key hex: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Invalid secret key encoding: {0}")] + InvalidEncoding(std::io::Error), + #[error("Parsed secret key does not belong to desired scheme")] + MismatchedScheme, +} + +/// A value-to-value conversion that consumes the input value. + +pub trait RefTo { + /// Performs the conversion. + fn ref_to(&self) -> T; +} + +/// Simple and safe type conversions that may fail in a controlled +/// way under some circumstances. + +pub trait TryFromRef: Sized { + /// The type returned in the event of a conversion error. + type Error; + /// Performs the conversion. + fn try_from_ref(value: &T) -> Result; +} + +/// Type capturing signature scheme IDs +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum SchemeType { + /// Type identifier for Ed25519 scheme + Ed25519, + /// Type identifier for Secp256k1 scheme + Secp256k1, + /// Type identifier for Common + Common, +} + +impl FromStr for SchemeType { + type Err = (); + + fn from_str(input: &str) -> Result { + match input.to_lowercase().as_str() { + "ed25519" => Ok(Self::Ed25519), + "secp256k1" => Ok(Self::Secp256k1), + "common" => Ok(Self::Common), + _ => Err(()), + } + } +} + +/// Represents a signature + +pub trait Signature: + Hash + PartialOrd + Serialize + BorshSerialize + BorshDeserialize + BorshSchema +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Convert from one Signature type to another + fn try_from_sig( + sig: &SIG, + ) -> Result { + if SIG::TYPE == Self::TYPE { + let sig_arr = sig.try_to_vec().unwrap(); + let res = Self::try_from_slice(sig_arr.as_ref()); + res.map_err(ParseSignatureError::InvalidEncoding) + } else { + Err(ParseSignatureError::MismatchedScheme) + } + } + /// Convert from self to another SecretKey type + fn try_to_sig(&self) -> Result { + SIG::try_from_sig(self) + } +} + +/// Represents a public key + +pub trait PublicKey: + BorshSerialize + + BorshDeserialize + + BorshSchema + + Ord + + Clone + + Display + + Debug + + PartialOrd + + FromStr + + Hash + + Send + + Sync +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Convert from one PublicKey type to another + fn try_from_pk( + pk: &PK, + ) -> Result { + if Self::TYPE == PK::TYPE { + let pk_arr = pk.try_to_vec().unwrap(); + let res = Self::try_from_slice(pk_arr.as_ref()); + res.map_err(ParsePublicKeyError::InvalidEncoding) + } else { + Err(ParsePublicKeyError::MismatchedScheme) + } + } + /// Convert from self to another PublicKey type + fn try_to_pk(&self) -> Result { + PK::try_from_pk(self) + } +} + +/// Represents a secret key + +pub trait SecretKey: + BorshSerialize + + BorshDeserialize + + BorshSchema + + Display + + Debug + + RefTo + + FromStr + + Clone + + Sync + + Send +{ + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Represents the public part of this keypair + type PublicKey: PublicKey; + /// Convert from one SecretKey type to self + fn try_from_sk( + sk: &SK, + ) -> Result { + if SK::TYPE == Self::TYPE { + let sk_vec = sk.try_to_vec().unwrap(); + let res = Self::try_from_slice(sk_vec.as_ref()); + res.map_err(ParseSecretKeyError::InvalidEncoding) + } else { + Err(ParseSecretKeyError::MismatchedScheme) + } + } + /// Convert from self to another SecretKey type + fn try_to_sk(&self) -> Result { + SK::try_from_sk(self) + } +} + +/// Represents a digital signature scheme. More precisely this trait captures +/// the concepts of public keys, private keys, and signatures as well as +/// the algorithms over these concepts to generate keys, sign messages, and +/// verify signatures. + +pub trait SigScheme: Eq + Ord + Debug + Serialize + Default { + /// Represents the signature for this scheme + type Signature: 'static + Signature; + /// Represents the public key for this scheme + type PublicKey: 'static + PublicKey; + /// Represents the secret key for this scheme + type SecretKey: 'static + SecretKey; + /// The scheme type of this implementation + const TYPE: SchemeType; + /// Generate a keypair. + #[cfg(feature = "rand")] + fn generate(csprng: &mut R) -> Self::SecretKey + where + R: CryptoRng + RngCore; + /// Sign the data with a key. + fn sign( + keypair: &Self::SecretKey, + data: impl AsRef<[u8]>, + ) -> Self::Signature; + /// Check that the public key matches the signature on the given data. + fn verify_signature( + pk: &Self::PublicKey, + data: &T, + sig: &Self::Signature, + ) -> Result<(), VerifySigError>; + /// Check that the public key matches the signature on the given raw data. + fn verify_signature_raw( + pk: &Self::PublicKey, + data: &[u8], + sig: &Self::Signature, + ) -> Result<(), VerifySigError>; +} + +/// Public key hash derived from `common::Key` borsh encoded bytes (hex string +/// of the first 40 chars of sha256 hash) +#[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] +#[serde(transparent)] +pub struct PublicKeyHash(pub(crate) String); + +const PKH_HASH_LEN: usize = address::HASH_LEN; + +impl From for String { + fn from(pkh: PublicKeyHash) -> Self { + pkh.0 + } +} + +impl Display for PublicKeyHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for PublicKeyHash { + type Err = PkhFromStringError; + + fn from_str(s: &str) -> Result { + if s.len() != PKH_HASH_LEN { + return Err(Self::Err::UnexpectedLen(s.len())); + } + Ok(Self(s.to_owned())) + } +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum PkhFromStringError { + #[error("Wrong PKH len. Expected {PKH_HASH_LEN}, got {0}")] + UnexpectedLen(usize), +} + +impl From<&PK> for PublicKeyHash { + fn from(pk: &PK) -> Self { + let pk_bytes = + pk.try_to_vec().expect("Public key encoding shouldn't fail"); + let mut hasher = Sha256::new(); + hasher.update(pk_bytes); + // hex of the first 40 chars of the hash + PublicKeyHash(format!( + "{:.width$X}", + hasher.finalize(), + width = PKH_HASH_LEN + )) + } +} + +/// Derive Tendermint raw hash from the public key +pub trait PublicKeyTmRawHash { + /// Derive Tendermint raw hash from the public key + fn tm_raw_hash(&self) -> String; +} + +impl PublicKeyTmRawHash for common::PublicKey { + fn tm_raw_hash(&self) -> String { + tm_consensus_key_raw_hash(self) + } +} + +/// Convert validator's consensus key into address raw hash that is compatible +/// with Tendermint +pub fn tm_consensus_key_raw_hash(pk: &common::PublicKey) -> String { + match pk { + common::PublicKey::Ed25519(pk) => { + let pkh = PublicKeyHash::from(pk); + pkh.0 + } + common::PublicKey::Secp256k1(pk) => { + let pkh = PublicKeyHash::from(pk); + pkh.0 + } + } +} + +/// Convert Tendermint validator's raw hash bytes to Namada raw hash string +pub fn tm_raw_hash_to_string(raw_hash: impl AsRef<[u8]>) -> String { + HEXUPPER.encode(raw_hash.as_ref()) +} + +/// Helpers for testing with keys. +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use borsh::BorshDeserialize; + use proptest::prelude::*; + use rand::prelude::{StdRng, ThreadRng}; + use rand::{thread_rng, SeedableRng}; + + use super::SigScheme; + use crate::types::key::*; + + /// A keypair for tests + pub fn keypair_1() -> ::SecretKey { + // generated from `cargo test gen_keypair -- --nocapture` + let bytes = [ + 33, 82, 91, 186, 100, 168, 220, 158, 185, 140, 63, 172, 3, 88, 52, + 113, 94, 30, 213, 84, 175, 184, 235, 169, 70, 175, 36, 252, 45, + 190, 138, 79, + ]; + ed25519::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// A keypair for tests + pub fn keypair_2() -> ::SecretKey { + // generated from `cargo test gen_keypair -- --nocapture` + let bytes = [ + 27, 238, 157, 32, 131, 242, 184, 142, 146, 189, 24, 249, 68, 165, + 205, 71, 213, 158, 25, 253, 52, 217, 87, 52, 171, 225, 110, 131, + 238, 58, 94, 56, + ]; + ed25519::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// An Ethereum keypair for tests + pub fn keypair_3() -> ::SecretKey { + let bytes = [ + 0xf3, 0x78, 0x78, 0x80, 0xba, 0x85, 0x0b, 0xa4, 0xc5, 0x74, 0x50, + 0x5a, 0x23, 0x54, 0x6d, 0x46, 0x74, 0xa1, 0x3f, 0x09, 0x75, 0x0c, + 0xf4, 0xb5, 0xb8, 0x17, 0x69, 0x64, 0xf4, 0x08, 0xd4, 0x80, + ]; + secp256k1::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// An Ethereum keypair for tests + pub fn keypair_4() -> ::SecretKey { + let bytes = [ + 0x68, 0xab, 0xce, 0x64, 0x54, 0x07, 0x7e, 0xf5, 0x1a, 0xb4, 0x31, + 0x7a, 0xb8, 0x8b, 0x98, 0x30, 0x27, 0x11, 0x4e, 0x58, 0x69, 0xd6, + 0x45, 0x94, 0xdc, 0x90, 0x8d, 0x94, 0xee, 0x58, 0x46, 0x91, + ]; + secp256k1::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// Generate an arbitrary [`super::SecretKey`]. + pub fn arb_keypair() -> impl Strategy { + any::<[u8; 32]>().prop_map(move |seed| { + let mut rng = StdRng::from_seed(seed); + S::generate(&mut rng) + }) + } + + /// Generate an arbitrary [`common::SecretKey`]. + pub fn arb_common_keypair() -> impl Strategy { + arb_keypair::() + .prop_map(|keypair| keypair.try_to_sk().unwrap()) + } + + /// Generate a new random [`super::SecretKey`]. + pub fn gen_keypair() -> S::SecretKey { + let mut rng: ThreadRng = thread_rng(); + S::generate(&mut rng) + } +} + +#[cfg(test)] +macro_rules! sigscheme_test { + ($name:ident, $type:ty) => { + pub mod $name { + use super::*; + + /// Run `cargo test gen_keypair -- --nocapture` to generate a + /// keypair. + #[test] + fn gen_keypair0() { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + let keypair = <$type>::generate(&mut rng); + println!( + "keypair {:?}", + keypair.try_to_vec().unwrap().as_slice() + ); + } + /// Run `cargo test gen_keypair -- --nocapture` to generate a + /// new keypair. + #[test] + fn gen_keypair1() { + let secret_key = testing::gen_keypair::<$type>(); + let public_key = secret_key.ref_to(); + println!("Public key: {}", public_key); + println!("Secret key: {}", secret_key); + } + + /// Sign a simple message and verify the signature. + #[test] + fn gen_sign_verify() { + use rand::prelude::ThreadRng; + use rand::thread_rng; + + let mut rng: ThreadRng = thread_rng(); + let sk = <$type>::generate(&mut rng); + let sig = <$type>::sign(&sk, b"hello"); + assert!( + <$type>::verify_signature_raw(&sk.ref_to(), b"hello", &sig) + .is_ok() + ); + } + } + }; +} + +#[cfg(test)] +sigscheme_test! {ed25519_test, ed25519::SigScheme} +#[cfg(test)] +sigscheme_test! {secp256k1_test, secp256k1::SigScheme} + +#[cfg(test)] +mod more_tests { + use super::*; + + #[test] + fn zeroize_keypair_ed25519() { + use rand::thread_rng; + + let sk = ed25519::SigScheme::generate(&mut thread_rng()); + let sk_bytes = sk.0.as_bytes(); + let len = sk_bytes.len(); + let ptr = sk_bytes.as_ptr(); + + drop(sk); + + assert_eq!(&[0u8; 32], unsafe { + core::slice::from_raw_parts(ptr, len) + }); + } + + #[test] + fn zeroize_keypair_secp256k1() { + use rand::thread_rng; + + let mut sk = secp256k1::SigScheme::generate(&mut thread_rng()); + let sk_scalar = sk.0.to_scalar_ref(); + let len = sk_scalar.0.len(); + let ptr = sk_scalar.0.as_ref().as_ptr(); + + let original_data = sk_scalar.0; + + drop(sk); + + assert_ne!(&original_data, unsafe { + core::slice::from_raw_parts(ptr, len) + }); + } +} diff --git a/shared/src/types/key/secp256k1.rs b/core/src/types/key/secp256k1.rs similarity index 98% rename from shared/src/types/key/secp256k1.rs rename to core/src/types/key/secp256k1.rs index 733690387e6..ba622b9d7ef 100644 --- a/shared/src/types/key/secp256k1.rs +++ b/core/src/types/key/secp256k1.rs @@ -612,16 +612,16 @@ mod test { let expected_pk_hex = "a225bf565ff4ea039bccba3e26456e910cd74e4616d67ee0a166e26da6e5e55a08d0fa1659b4b547ba7139ca531f62907b9c2e72b80712f1c81ece43c33f4b8b"; let expected_eth_addr_hex = "6ea27154616a29708dce7650b475dd6b82eba6a3"; - let sk_bytes = hex::decode(SECRET_KEY_HEX).unwrap(); + let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let pk: PublicKey = sk.ref_to(); // We're removing the first byte with // `libsecp256k1::util::TAG_PUBKEY_FULL` - let pk_hex = hex::encode(&pk.0.serialize()[1..]); + let pk_hex = HEXLOWER.encode(&pk.0.serialize()[1..]); assert_eq!(expected_pk_hex, pk_hex); let eth_addr: EthAddress = (&pk).into(); - let eth_addr_hex = hex::encode(eth_addr.0); + let eth_addr_hex = HEXLOWER.encode(ð_addr.0[..]); assert_eq!(expected_eth_addr_hex, eth_addr_hex); } @@ -629,7 +629,7 @@ mod test { /// with Serde is idempotent. #[test] fn test_roundtrip_serde() { - let sk_bytes = hex::decode(SECRET_KEY_HEX).unwrap(); + let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); @@ -644,7 +644,7 @@ mod test { /// with Borsh is idempotent. #[test] fn test_roundtrip_borsh() { - let sk_bytes = hex::decode(SECRET_KEY_HEX).unwrap(); + let sk_bytes = HEXLOWER.decode(SECRET_KEY_HEX.as_bytes()).unwrap(); let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); let to_sign = "test".as_bytes(); let mut signature = SigScheme::sign(&sk, to_sign); diff --git a/shared/src/types/masp.rs b/core/src/types/masp.rs similarity index 100% rename from shared/src/types/masp.rs rename to core/src/types/masp.rs diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs new file mode 100644 index 00000000000..ccae2b8c317 --- /dev/null +++ b/core/src/types/mod.rs @@ -0,0 +1,21 @@ +//! Types definitions. + +pub mod address; +pub mod chain; +pub mod eth_abi; +pub mod eth_bridge_pool; +pub mod ethereum_events; +pub mod governance; +pub mod hash; +pub mod ibc; +pub mod internal; +pub mod keccak; +pub mod key; +pub mod masp; +pub mod storage; +pub mod time; +pub mod token; +pub mod transaction; +pub mod validity_predicate; +pub mod vote_extensions; +pub mod voting_power; diff --git a/shared/src/types/named_address.rs b/core/src/types/named_address.rs similarity index 100% rename from shared/src/types/named_address.rs rename to core/src/types/named_address.rs diff --git a/shared/src/types/storage.rs b/core/src/types/storage.rs similarity index 94% rename from shared/src/types/storage.rs rename to core/src/types/storage.rs index 78cdf912a09..4a38eeccdc1 100644 --- a/shared/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -1,29 +1,29 @@ //! Storage types use std::convert::{TryFrom, TryInto}; use std::fmt::Display; +use std::io::Write; use std::num::ParseIntError; use std::ops::{Add, Deref, Div, Mul, Rem, Sub}; use std::str::FromStr; use arse_merkle_tree::InternalKey; use bit_vec::BitVec; -use borsh::maybestd::io::Write; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::BASE32HEX_NOPAD; use ics23::CommitmentProof; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; -#[cfg(feature = "ferveo-tpke")] -use super::transaction::WrapperTx; use crate::bytes::ByteBuf; use crate::ledger::eth_bridge::storage::bridge_pool::BridgePoolProof; -use crate::ledger::storage::IBC_KEY_LIMIT; use crate::types::address::{self, Address}; use crate::types::hash::Hash; use crate::types::keccak::{KeccakHash, TryFromError}; use crate::types::time::DateTimeUtc; +/// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage +pub const IBC_KEY_LIMIT: usize = 120; + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -284,7 +284,7 @@ impl core::fmt::Debug for BlockHash { } /// The data from Tendermint header -/// relevant for Anoma storage +/// relevant for Namada storage #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub struct Header { /// Merkle root hash of block @@ -359,6 +359,13 @@ pub struct StringKey { pub length: usize, } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum TreeKeyError { + #[error("Invalid key for merkle tree: {0}")] + InvalidMerkleKey(String), +} + impl Deref for StringKey { type Target = InternalKey; @@ -741,7 +748,7 @@ impl KeySeg for Epoch { fn parse(string: String) -> Result { string .split_once('=') - .and_then(|(prefix, epoch)| (prefix == "E").then(|| epoch)) + .and_then(|(prefix, epoch)| (prefix == "E").then_some(epoch)) .ok_or_else(|| { Error::ParseKeySeg(format!( "Invalid epoch prefix on key: {string}" @@ -924,6 +931,53 @@ impl Epoch { pub fn prev(&self) -> Self { Self(self.0 - 1) } + + /// Iterate a range of consecutive epochs starting from `self` of a given + /// length. Work-around for `Step` implementation pending on stabilization of . + pub fn iter_range(self, len: u64) -> impl Iterator + Clone { + let start_ix: u64 = self.into(); + let end_ix: u64 = start_ix + len; + (start_ix..end_ix).map(Epoch::from) + } + + /// Checked epoch subtraction. Computes self - rhs, returning None if + /// overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_sub(self, rhs: Epoch) -> Option { + if rhs.0 > self.0 { + None + } else { + Some(Self(self.0 - rhs.0)) + } + } + + /// Checked epoch subtraction. Computes self - rhs, returning default + /// `Epoch(0)` if overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn sub_or_default(self, rhs: Epoch) -> Self { + self.checked_sub(rhs).unwrap_or_default() + } +} + +impl From for Epoch { + fn from(epoch: u64) -> Self { + Epoch(epoch) + } +} + +impl From for u64 { + fn from(epoch: Epoch) -> Self { + epoch.0 + } +} + +// TODO remove this once it's not being used +impl From for usize { + fn from(epoch: Epoch) -> Self { + epoch.0 as usize + } } impl Add for Epoch { @@ -934,6 +988,15 @@ impl Add for Epoch { } } +// TODO remove this once it's not being used +impl Add for Epoch { + type Output = Self; + + fn add(self, rhs: usize) -> Self::Output { + Epoch(self.0 + rhs as u64) + } +} + impl Sub for Epoch { type Output = Epoch; @@ -942,6 +1005,14 @@ impl Sub for Epoch { } } +impl Sub for Epoch { + type Output = Self; + + fn sub(self, rhs: Epoch) -> Self::Output { + Epoch(self.0 - rhs.0) + } +} + impl Mul for Epoch { type Output = Epoch; @@ -966,14 +1037,6 @@ impl Rem for Epoch { } } -impl Sub for Epoch { - type Output = Epoch; - - fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) - } -} - impl Add for Epoch { type Output = Epoch; @@ -990,18 +1053,6 @@ impl Mul for Epoch { } } -impl From for u64 { - fn from(epoch: Epoch) -> Self { - epoch.0 - } -} - -impl From for Epoch { - fn from(value: u64) -> Self { - Self(value) - } -} - /// Predecessor block epochs #[derive( Clone, @@ -1089,35 +1140,6 @@ impl Epochs { } } -#[cfg(feature = "ferveo-tpke")] -#[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize)] -/// Wrapper txs to be decrypted in the next block proposal -pub struct TxQueue(std::collections::VecDeque); - -#[cfg(feature = "ferveo-tpke")] -impl TxQueue { - /// Add a new wrapper at the back of the queue - pub fn push(&mut self, wrapper: WrapperTx) { - self.0.push_back(wrapper); - } - - /// Remove the wrapper at the head of the queue - pub fn pop(&mut self) -> Option { - self.0.pop_front() - } - - /// Get an iterator over the queue - pub fn iter(&self) -> impl std::iter::Iterator { - self.0.iter() - } - - /// Check if there are any txs in the queue - #[allow(dead_code)] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - /// A value of a storage prefix iterator. #[derive(Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema)] pub struct PrefixValue { diff --git a/shared/src/types/time.rs b/core/src/types/time.rs similarity index 89% rename from shared/src/types/time.rs rename to core/src/types/time.rs index dfca614c822..a508501d941 100644 --- a/shared/src/types/time.rs +++ b/core/src/types/time.rs @@ -7,10 +7,6 @@ use std::ops::{Add, Sub}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; pub use chrono::{DateTime, Duration, TimeZone, Utc}; -use crate::tendermint::time::Time; -use crate::tendermint::Error as TendermintError; -use crate::tendermint_proto::google::protobuf; - /// Check if the given `duration` has passed since the given `start. pub fn duration_passed( current: DateTimeUtc, @@ -198,10 +194,15 @@ impl From for prost_types::Timestamp { } } -impl TryFrom for DateTimeUtc { +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl TryFrom + for DateTimeUtc +{ type Error = prost_types::TimestampOutOfSystemRangeError; - fn try_from(timestamp: protobuf::Timestamp) -> Result { + fn try_from( + timestamp: crate::tendermint_proto::google::protobuf::Timestamp, + ) -> Result { Self::try_from(prost_types::Timestamp { seconds: timestamp.seconds, nanos: timestamp.nanos, @@ -230,18 +231,20 @@ impl From for Rfc3339String { } } -impl TryFrom for Time { - type Error = TendermintError; +#[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] +impl TryFrom for crate::tendermint::time::Time { + type Error = crate::tendermint::Error; fn try_from(dt: DateTimeUtc) -> Result { Self::parse_from_rfc3339(&DateTime::to_rfc3339(&dt.0)) } } -impl TryFrom