diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 03fa720d5..38c36dcb9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,14 +11,14 @@ on: - ".github/workflows/ci.yml" - "!sdk/**" - pull_request_target: - types: [opened, synchronize] - paths: - - "**/*.rs" - - "**/Cargo.toml" - - "**/Cargo.lock" - - ".github/workflows/ci.yml" - - "!sdk/**" + pull_request_target: + types: [opened, synchronize] + paths: + - "**/*.rs" + - "**/Cargo.toml" + - "**/Cargo.lock" + - ".github/workflows/ci.yml" + - "!sdk/**" # pull_request: # branches: @@ -28,6 +28,7 @@ on: # - "**/Cargo.toml" # - "**/Cargo.lock" # - ".github/workflows/ci.yml" + # - "!sdk/**" concurrency: group: ci-${{ github.head_ref || github.ref_name }} @@ -51,6 +52,7 @@ env: POLYGON_EXECUTION_RPC: ${{secrets.POLYGON_EXECUTION_RPC}} SEI_RPC_URL: ${{secrets.SEI_RPC_URL}} KAVA_RPC_URL: ${{secrets.KAVA_RPC_URL}} + PHAROS_ATLANTIC_RPC: ${{secrets.PHAROS_ATLANTIC_RPC}} jobs: check-wasm: @@ -766,3 +768,86 @@ jobs: - name: Run Tendermint Tests run: | RUST_LOG=tendermint_prover=trace,tesseract=trace,tendermint_verifier=trace cargo test -p tendermint-prover -- --nocapture --ignored + + pharos-tests: + name: Pharos Tests + runs-on: ubuntu-22.04 + if: github.event.pull_request.draft != true + steps: + - name: Get User Permission + id: checkAccess + uses: actions-cool/check-user-permission@v2 + with: + require: write + username: ${{ github.triggering_actor }} + + - name: Check User Permission + if: steps.checkAccess.outputs.require-result == 'false' + run: | + echo "${{ github.triggering_actor }} does not have permissions on this repo." + echo "Current permission level is ${{ steps.checkAccess.outputs.user-permission }}" + echo "Job originally triggered by ${{ github.actor }}" + exit 1 + + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + token: ${{ secrets.GH_TOKEN }} + submodules: recursive + + - name: Install toolchain + uses: dtolnay/rust-toolchain@nightly + with: + toolchain: stable + + - name: Add wasm toolchain + run: | + rustup update nightly + rustup target add wasm32-unknown-unknown --toolchain nightly + rustup target add wasm32-unknown-unknown + rustup component add rust-src + + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install -y clang netcat wget curl libssl-dev llvm libclang-dev libudev-dev make libprotobuf-dev protobuf-compiler pkg-config + echo "LIBCLANG_PATH=/usr/lib/llvm-14/lib" >> $GITHUB_ENV + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - name: Set up Node + uses: actions/setup-node@v3 + with: + node-version: 22 + cache-dependency-path: "evm/pnpm-lock.yaml" + cache: "pnpm" + + - name: Install npm dependencies + working-directory: evm + run: | + pnpm install + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly + + - name: Build Foundry artifacts + working-directory: evm + run: forge build + + - uses: webfactory/ssh-agent@v0.7.0 + with: + ssh-private-key: ${{ secrets.SSH_KEY }} + + - uses: Swatinem/rust-cache@v2 + + - name: Run Pharos unit tests + run: | + cargo test -p pharos-primitives -p pharos-verifier -- --nocapture + + - name: Run Pharos integration tests + run: | + PHAROS_ATLANTIC_RPC=${{ secrets.PHAROS_ATLANTIC_RPC }} cargo test -p pallet-ismp-testsuite "pharos" -- --nocapture --ignored diff --git a/Cargo.lock b/Cargo.lock index 89602ed51..b32d4af86 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3521,8 +3521,8 @@ dependencies = [ "alloy-rlp", "alloy-rlp-derive", "anyhow", - "ark-ec 0.4.2", "bls_on_arkworks", + "crypto-utils", "geth-primitives", "ismp", "log", @@ -3531,7 +3531,6 @@ dependencies = [ "primitive-types 0.13.1", "ssz-rs", "sync-committee-primitives", - "sync-committee-verifier", ] [[package]] @@ -4757,10 +4756,15 @@ name = "crypto-utils" version = "0.2.0" dependencies = [ "anyhow", + "bls_on_arkworks", + "hex", "parity-scale-codec", "scale-info", + "serde", + "serde-hex-utils", "sp-core", "sp-io", + "ssz-rs", ] [[package]] @@ -8183,6 +8187,7 @@ dependencies = [ "ismp-optimism", "ismp-parachain", "ismp-parachain-runtime-api", + "ismp-pharos", "ismp-polygon", "ismp-sync-committee", "ismp-tendermint", @@ -10443,6 +10448,25 @@ dependencies = [ "polkadot-sdk", ] +[[package]] +name = "ismp-pharos" +version = "0.1.0" +dependencies = [ + "anyhow", + "geth-primitives", + "ismp", + "log", + "pallet-ismp", + "pallet-ismp-host-executive", + "parity-scale-codec", + "pharos-primitives", + "pharos-state-machine", + "pharos-verifier", + "polkadot-sdk", + "scale-info", + "sync-committee-primitives", +] + [[package]] name = "ismp-polygon" version = "0.1.0" @@ -15213,18 +15237,22 @@ dependencies = [ "alloy-primitives 1.5.7", "alloy-sol-types 1.5.7", "anyhow", + "bls_on_arkworks", "ckb-merkle-mountain-range", + "crypto-utils", "dotenv", "env_logger 0.11.9", "ethereum-triedb", "evm-state-machine", "futures", + "geth-primitives", "hash-db", "hex", "hyperbridge-client-machine", "ismp", "ismp-bsc", "ismp-grandpa", + "ismp-pharos", "ismp-solidity-abi", "ismp-sync-committee", "ismp-testsuite", @@ -15247,6 +15275,8 @@ dependencies = [ "pallet-token-governor", "pallet-xcm-gateway", "parity-scale-codec", + "pharos-primitives", + "pharos-prover", "polkadot-sdk", "primitive-types 0.13.1", "reqwest 0.11.27", @@ -17092,6 +17122,77 @@ dependencies = [ "indexmap 2.13.0", ] +[[package]] +name = "pharos-primitives" +version = "0.1.0" +dependencies = [ + "alloy-primitives 1.5.7", + "anyhow", + "crypto-utils", + "geth-primitives", + "hex-literal 0.4.1", + "ismp", + "parity-scale-codec", + "primitive-types 0.13.1", + "serde", + "sp-io", + "thiserror 2.0.18", +] + +[[package]] +name = "pharos-prover" +version = "0.1.0" +dependencies = [ + "alloy-eips", + "alloy-provider", + "ethabi", + "geth-primitives", + "hex", + "pharos-primitives", + "pharos-verifier", + "primitive-types 0.13.1", + "reqwest 0.11.27", + "serde", + "serde_json", + "sp-core", + "thiserror 2.0.18", +] + +[[package]] +name = "pharos-state-machine" +version = "0.1.0" +dependencies = [ + "alloy-rlp", + "anyhow", + "ethabi", + "evm-state-machine", + "geth-primitives", + "hex-literal 0.4.1", + "ismp", + "pallet-ismp-host-executive", + "parity-scale-codec", + "pharos-primitives", + "polkadot-sdk", + "primitive-types 0.13.1", +] + +[[package]] +name = "pharos-verifier" +version = "0.1.0" +dependencies = [ + "anyhow", + "bls_on_arkworks", + "crypto-utils", + "geth-primitives", + "hex", + "hex-literal 0.4.1", + "ismp", + "log", + "pharos-primitives", + "primitive-types 0.13.1", + "thiserror 2.0.18", +] + [[package]] name = "phf" version = "0.11.3" @@ -20408,6 +20509,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", + "webpki-roots 0.25.4", "winreg", ] @@ -27414,6 +27516,7 @@ name = "sync-committee-primitives" version = "0.1.1" dependencies = [ "anyhow", + "crypto-utils", "hex", "hex-literal 0.4.1", "parity-scale-codec", @@ -27948,6 +28051,7 @@ dependencies = [ "tendermint-primitives", "tesseract-evm", "tesseract-evm-tendermint", + "tesseract-pharos-evm", "tesseract-primitives", "tesseract-substrate", "tesseract-substrate-evm", @@ -28142,6 +28246,7 @@ dependencies = [ "hex-literal 0.4.1", "ismp", "ismp-grandpa", + "ismp-pharos", "ismp-solidity-abi", "log", "mmr-primitives", @@ -28150,6 +28255,7 @@ dependencies = [ "pallet-ismp-demo", "pallet-ismp-rpc", "parity-scale-codec", + "pharos-primitives", "polkadot-sdk", "primitive-types 0.12.2", "sp-core", @@ -28164,6 +28270,7 @@ dependencies = [ "tesseract-evm", "tesseract-grandpa", "tesseract-messaging", + "tesseract-pharos", "tesseract-polygon", "tesseract-primitives", "tesseract-substrate", @@ -28196,6 +28303,49 @@ dependencies = [ "transaction-fees", ] +[[package]] +name = "tesseract-pharos" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "geth-primitives", + "ismp", + "ismp-pharos", + "log", + "parity-scale-codec", + "pharos-primitives", + "pharos-prover", + "pharos-verifier", + "primitive-types 0.13.1", + "serde", + "serde_json", + "sp-core", + "tesseract-evm", + "tesseract-primitives", + "tokio", +] + +[[package]] +name = "tesseract-pharos-evm" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "ismp", + "log", + "pallet-ismp-host-executive", + "parity-scale-codec", + "pharos-primitives", + "pharos-prover", + "pharos-state-machine", + "primitive-types 0.13.1", + "serde", + "sp-core", + "tesseract-evm", + "tesseract-primitives", +] + [[package]] name = "tesseract-polygon" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index a44a3826a..1f0af96dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ members = [ "modules/ismp/clients/arbitrum", "modules/ismp/clients/optimism", "modules/ismp/clients/bsc", + "modules/ismp/clients/pharos", "modules/ismp/clients/grandpa", "modules/ismp/testsuite", "modules/ismp/clients/ismp-arbitrum", @@ -60,6 +61,9 @@ members = [ "modules/consensus/tendermint/prover", "modules/consensus/tendermint/primitives", "modules/consensus/tendermint/ics23-primitives", + "modules/consensus/pharos/primitives", + "modules/consensus/pharos/verifier", + "modules/consensus/pharos/prover", "modules/trees/ethereum", "modules/pallets/mmr", "modules/pallets/mmr/primitives", @@ -69,6 +73,7 @@ members = [ "modules/ismp/state-machines/evm", "modules/ismp/state-machines/substrate", "modules/ismp/state-machines/hyperbridge", + "modules/ismp/state-machines/pharos", "modules/pallets/consensus-incentives", "modules/pallets/messaging-fees", @@ -99,6 +104,7 @@ members = [ "tesseract/messaging/tron", "tesseract/messaging/evm-tendermint", "tesseract/messaging/substrate-evm", + "tesseract/messaging/pharos-evm", "tesseract/messaging/fees", "tesseract/messaging/fees/prisma-cli", "tesseract/messaging/telemetry", @@ -118,6 +124,7 @@ members = [ "tesseract/consensus/relayer", "tesseract/consensus/polygon", "tesseract/consensus/tendermint", + "tesseract/consensus/pharos", # Airdrop @@ -190,7 +197,7 @@ alloy-rlp-derive = "0.3.13" alloy-sol-macro = { version = "1.5.7", features = ["json"] } alloy-sol-types = { version = "1.5.7", default-features = false } alloy = { version = "1.7.3", default-features = false } -alloy-provider = { version = "1.7.3", default-features = false } +alloy-provider = { version = "1.7.3", default-features = false, features = ["reqwest", "reqwest-default-tls"] } alloy-transport = { version = "1.7.3", default-features = false } alloy-transport-http = { version = "1.7.3", default-features = false } alloy-signer = { version = "1.7.3", default-features = false } @@ -280,9 +287,13 @@ tendermint-verifier = { path = "./modules/consensus/tendermint/verifier", defaul tendermint-primitives = { path = "./modules/consensus/tendermint/primitives", default-features = false } tendermint-prover = { path = "./modules/consensus/tendermint/prover", default-features = false } tendermint-ics23-primitives = { path = "./modules/consensus/tendermint/ics23-primitives", default-features = false } +pharos-primitives = { path = "./modules/consensus/pharos/primitives", default-features = false } +pharos-verifier = { path = "./modules/consensus/pharos/verifier", default-features = false } +pharos-prover = { path = "./modules/consensus/pharos/prover", default-features = false } # consensus clients ismp-bsc = { path = "./modules/ismp/clients/bsc", default-features = false } +ismp-pharos = { path = "./modules/ismp/clients/pharos", default-features = false } ismp-sync-committee = { path = "./modules/ismp/clients/sync-committee", default-features = false } arbitrum-verifier = { path = "./modules/ismp/clients/arbitrum", default-features = false } op-verifier = { path = "./modules/ismp/clients/optimism", default-features = false } @@ -293,6 +304,7 @@ ismp-tendermint = { path = "modules/ismp/clients/tendermint", default-features = # state machine clients evm-state-machine = { path = "./modules/ismp/state-machines/evm", default-features = false } +pharos-state-machine = { path = "./modules/ismp/state-machines/pharos", default-features = false } hyperbridge-client-machine = { path = "modules/ismp/state-machines/hyperbridge", default-features = false } # ismp modules @@ -330,6 +342,7 @@ tesseract-messaging = { path = "tesseract/messaging/messaging" } tesseract-fisherman = { path = "tesseract/messaging/fisherman" } tesseract-substrate = { path = "tesseract/messaging/substrate" } tesseract-substrate-evm = { path = "tesseract/messaging/substrate-evm" } +tesseract-pharos-evm = { path = "tesseract/messaging/pharos-evm" } tesseract-evm = { path = "tesseract/messaging/evm" } tesseract-tron = { path = "tesseract/messaging/tron" } tesseract-evm-tendermint = { path = "tesseract/messaging/evm-tendermint" } @@ -350,6 +363,7 @@ tesseract-grandpa = { path = "tesseract/consensus/grandpa" } tesseract-consensus = { path = "tesseract/consensus/relayer" } tesseract-polygon = { path = "tesseract/consensus/polygon" } tesseract-tendermint = { path = "tesseract/consensus/tendermint" } +tesseract-pharos = { path = "tesseract/consensus/pharos" } [workspace.dependencies.codec] diff --git a/modules/consensus/beefy/prover/src/fiat_shamir.rs b/modules/consensus/beefy/prover/src/fiat_shamir.rs index ddb3d63f0..ff49e4753 100644 --- a/modules/consensus/beefy/prover/src/fiat_shamir.rs +++ b/modules/consensus/beefy/prover/src/fiat_shamir.rs @@ -351,8 +351,7 @@ pub fn filter_signatures_for_challenge( let last = temp.last_mut().unwrap(); *last += 27; - filtered - .push(SignatureWithAuthorityIndex { index: authority_index, signature: temp }); + filtered.push(SignatureWithAuthorityIndex { index: authority_index, signature: temp }); } Ok(filtered) diff --git a/modules/consensus/bsc/prover/src/test.rs b/modules/consensus/bsc/prover/src/test.rs index 8dafb3fc3..ce791afa2 100644 --- a/modules/consensus/bsc/prover/src/test.rs +++ b/modules/consensus/bsc/prover/src/test.rs @@ -132,7 +132,7 @@ async fn verify_bsc_pos_headers() { // Skip blocks without enough participants (2/3 + 1 threshold) let participant_count = validators_bit_set.iter().as_bitslice().count_ones(); - let required_participants = (2 * validator_set_for_check.len() / 3); + let required_participants = 2 * validator_set_for_check.len() / 3; if participant_count < required_participants { println!( "Not enough participants in bsc update for block {} ({}/{}), skipping", diff --git a/modules/consensus/bsc/verifier/Cargo.toml b/modules/consensus/bsc/verifier/Cargo.toml index b00886edc..43942c21c 100644 --- a/modules/consensus/bsc/verifier/Cargo.toml +++ b/modules/consensus/bsc/verifier/Cargo.toml @@ -17,10 +17,9 @@ primitive-types = { workspace = true } codec = { workspace = true } ismp = { workspace = true, default-features = false } geth-primitives = { workspace = true, default-features = false } -sync-committee-verifier = { workspace = true, default-features = false } +crypto-utils = { workspace = true, default-features = false } sync-committee-primitives = { workspace = true, default-features = false } bls = { workspace = true } -ark-ec = { workspace = true } ssz-rs = { git = "https://github.com/polytope-labs/ssz-rs", branch = "main", default-features = false } [dependencies.polkadot-sdk] @@ -38,9 +37,8 @@ std = [ "alloy-primitives/std", "alloy-rlp/std", "bls/std", - "sync-committee-verifier/std", + "crypto-utils/std", "sync-committee-primitives/std", "geth-primitives/std", - "ark-ec/std", "ssz-rs/std", ] diff --git a/modules/consensus/bsc/verifier/src/lib.rs b/modules/consensus/bsc/verifier/src/lib.rs index 0e382b6c6..e54ddfcaa 100644 --- a/modules/consensus/bsc/verifier/src/lib.rs +++ b/modules/consensus/bsc/verifier/src/lib.rs @@ -20,14 +20,13 @@ use polkadot_sdk::*; use alloc::vec::Vec; use anyhow::anyhow; -use bls::{point_to_pubkey, types::G1ProjectivePoint}; +use crypto_utils::aggregate_public_keys; use geth_primitives::{CodecHeader, Header}; use ismp::messaging::Keccak256; use primitives::{parse_extra, BscClientUpdate, Config, VALIDATOR_BIT_SET_SIZE}; use sp_core::H256; use ssz_rs::{Bitvector, Deserialize}; use sync_committee_primitives::constants::BlsPublicKey; -use sync_committee_verifier::crypto::pubkey_to_projective; pub mod primitives; @@ -164,12 +163,3 @@ pub fn verify_bsc_header( next_validators: next_validator_addresses, }) } - -pub fn aggregate_public_keys(keys: &[BlsPublicKey]) -> Vec { - let aggregate = keys - .into_iter() - .filter_map(|key| pubkey_to_projective(key).ok()) - .fold(G1ProjectivePoint::default(), |acc, next| acc + next); - - point_to_pubkey(aggregate.into()) -} diff --git a/modules/consensus/pharos/primitives/Cargo.toml b/modules/consensus/pharos/primitives/Cargo.toml new file mode 100644 index 000000000..1b6f26514 --- /dev/null +++ b/modules/consensus/pharos/primitives/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "pharos-primitives" +version = "0.1.0" +edition = "2021" +description = "Primitive types for Pharos consensus verifier" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +codec = { workspace = true, features = ["derive"] } +alloy-primitives = { workspace = true } +crypto-utils = { workspace = true, default-features = false } +primitive-types = { workspace = true, features = ["serde_no_std", "impl-codec"] } +hex-literal = { workspace = true } +serde = { workspace = true, optional = true, features = ["derive"] } +sp-io = { workspace = true, default-features = false } +anyhow = { workspace = true, default-features = false } +thiserror = { workspace = true, default-features = false } +geth-primitives = { workspace = true, default-features = false } +ismp = { workspace = true, default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "alloy-primitives/std", + "crypto-utils/std", + "primitive-types/std", + "anyhow/std", + "sp-io/std", + "serde", + "geth-primitives/std", + "ismp/std", + "thiserror/std", +] diff --git a/modules/consensus/pharos/primitives/src/constants.rs b/modules/consensus/pharos/primitives/src/constants.rs new file mode 100644 index 000000000..0de429530 --- /dev/null +++ b/modules/consensus/pharos/primitives/src/constants.rs @@ -0,0 +1,121 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Constants and configuration for Pharos consensus. + +use alloy_primitives::Address; + +/// Re-export BLS types from crypto-utils +pub use crypto_utils::{ + BlsPublicKey, BlsSignature, BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN, +}; + +/// The staking contract address where validator set is stored. +/// Address: 0x4100000000000000000000000000000000000000 +pub const STAKING_CONTRACT_ADDRESS: Address = + Address::new([0x41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + +/// Consensus ID for Pharos network +pub const PHAROS_CONSENSUS_ID: [u8; 4] = *b"PHAR"; + +/// Mainnet epoch length in seconds (4 hours) +pub const MAINNET_EPOCH_LENGTH_SECS: u64 = 4 * 60 * 60; // 14400 seconds + +/// Testnet (Atlantic) epoch length in seconds. +pub const TESTNET_EPOCH_LENGTH_SECS: u64 = 7828; + +/// Pharos Mainnet chain ID +pub const PHAROS_MAINNET_CHAIN_ID: u32 = 688600; + +/// Pharos Atlantic Testnet chain ID +pub const PHAROS_ATLANTIC_CHAIN_ID: u32 = 688689; + +/// Default withdraw window in epochs from the Pharos staking contract. +pub const DEFAULT_WITHDRAW_WINDOW_EPOCHS: u64 = 84; + +/// Configuration trait for Pharos network parameters. +pub trait Config: Clone + Send + Sync { + /// The epoch length in seconds + const EPOCH_LENGTH_SECS: u64; + + /// The epoch length in blocks (derived from epoch length and block time) + const EPOCH_LENGTH_BLOCKS: u64; + + /// The chain ID for this network + const CHAIN_ID: u64; + + /// Network identifier + const ID: [u8; 4]; + + /// The unstaking period in seconds (withdraw_window_epochs × epoch_length_secs). + /// Defaults to `DEFAULT_WITHDRAW_WINDOW_EPOCHS × EPOCH_LENGTH_SECS`. + const UNBONDING_PERIOD: u64 = DEFAULT_WITHDRAW_WINDOW_EPOCHS * Self::EPOCH_LENGTH_SECS; + + /// Calculate the epoch number for a given block number + fn compute_epoch(block_number: u64) -> u64 { + block_number / Self::EPOCH_LENGTH_BLOCKS + } + + /// Check if a block is an epoch boundary block (last block of an epoch). + /// + /// The epoch boundary is defined as the last block of an epoch, i.e., + /// `(block_number + 1) % epoch_length == 0`. + /// + /// At epoch boundaries, the validator set for the next epoch is finalized + fn is_epoch_boundary(block_number: u64) -> bool { + (block_number + 1) % Self::EPOCH_LENGTH_BLOCKS == 0 + } + + /// Get the first block number of the next epoch + fn next_epoch_start(current_block: u64) -> u64 { + let current_epoch = Self::compute_epoch(current_block); + (current_epoch + 1) * Self::EPOCH_LENGTH_BLOCKS + } +} + +/// Pharos Mainnet configuration +#[derive(Clone, Default, Debug)] +pub struct Mainnet; + +impl Config for Mainnet { + /// 4 hours epoch length + const EPOCH_LENGTH_SECS: u64 = MAINNET_EPOCH_LENGTH_SECS; + + /// With ~1 second finality (sub-second), assuming 1 block per second + /// 4 hours = 14400 blocks + const EPOCH_LENGTH_BLOCKS: u64 = 14400; + + /// Mainnet chain ID - TBD + /// Placeholder based on testnet pattern + const CHAIN_ID: u64 = 688600; + + const ID: [u8; 4] = PHAROS_CONSENSUS_ID; +} + +/// Pharos Testnet configuration +#[derive(Clone, Default, Debug)] +pub struct Testnet; + +impl Config for Testnet { + /// ~93.8 minutes epoch length + const EPOCH_LENGTH_SECS: u64 = TESTNET_EPOCH_LENGTH_SECS; + + const EPOCH_LENGTH_BLOCKS: u64 = 7828; + + /// Pharos Testnet chain ID + const CHAIN_ID: u64 = 688689; + + const ID: [u8; 4] = PHAROS_CONSENSUS_ID; +} diff --git a/modules/consensus/pharos/primitives/src/lib.rs b/modules/consensus/pharos/primitives/src/lib.rs new file mode 100644 index 000000000..0972de5e1 --- /dev/null +++ b/modules/consensus/pharos/primitives/src/lib.rs @@ -0,0 +1,24 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +pub mod constants; +pub mod spv; +pub mod types; + +pub use constants::*; +pub use types::*; diff --git a/modules/consensus/pharos/primitives/src/spv.rs b/modules/consensus/pharos/primitives/src/spv.rs new file mode 100644 index 000000000..b23ef65ab --- /dev/null +++ b/modules/consensus/pharos/primitives/src/spv.rs @@ -0,0 +1,589 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos hexary hash tree SPV proof verification. +//! +//! Node types: MSU Root (8192 bytes), Internal (515 bytes), Leaf (65 bytes). +//! Internal nodes use SkipEmpty hashing: `sha256(header || non-zero child slots)`. + +use alloc::vec::Vec; + +use crate::types::{PharosProofNode, SiblingLeftmostLeafProof}; + +/// Errors returned by SPV proof verification. +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Proof contains no nodes")] + EmptyProof, + #[error("Terminal proof node is not a valid leaf")] + InvalidLeaf, + #[error("Leaf key hash does not match the expected key")] + KeyMismatch, + #[error("Leaf value hash does not match the expected value")] + ValueMismatch, + #[error("Proof node has an unrecognized length")] + InvalidNodeLength, + #[error("Child hash in parent does not match computed child hash")] + HashChainBroken, + #[error("Slot offset is out of bounds for the parent node")] + SlotOutOfBounds, + #[error("Computed root hash does not match the expected root")] + RootMismatch, + #[error("Terminal node is not a valid internal node")] + InvalidTerminalNode, + #[error("Target nibble slot is not empty")] + TargetSlotNotEmpty, + #[error("Key exists in the trie")] + KeyExists, + #[error("Sibling proof count does not match non-empty slot count")] + SiblingCountMismatch, + #[error("Sibling proof references an invalid or disallowed slot index")] + InvalidSiblingSlot, + #[error("Duplicate sibling proof for the same slot")] + DuplicateSiblingSlot, + #[error("Sibling proof has an empty proof path")] + EmptySiblingPath, + #[error("Sibling leaf nibble does not route through declared slot index")] + SiblingNibbleMismatch, + #[error("Sibling proof failed verification: {0}")] + SiblingProofInvalid(alloc::boxed::Box), +} + +const INTERNAL_NODE_HEADER: usize = 3; +const INTERNAL_NODE_SLOTS: usize = 16; +const INTERNAL_NODE_SLOT_SIZE: usize = 32; +const INTERNAL_NODE_LEN: usize = + INTERNAL_NODE_HEADER + INTERNAL_NODE_SLOTS * INTERNAL_NODE_SLOT_SIZE; +const MSU_ROOT_NODE_LEN: usize = 256 * INTERNAL_NODE_SLOT_SIZE; +const LEAF_NODE_LEN: usize = 65; +const LEAF_NODE_TYPE: u8 = 1; +const ZERO_HASH: [u8; 32] = [0u8; 32]; + +pub fn sha256(data: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(data) +} + +/// Pharos nibble extraction: low nibble first at even depths, high nibble at odd depths. +pub fn nibble_at_depth(key_hash: &[u8], depth: usize) -> u8 { + let byte_index = depth / 2; + if depth % 2 == 0 { + key_hash[byte_index] & 0x0F + } else { + (key_hash[byte_index] >> 4) & 0x0F + } +} + +fn is_zero_slot(slot: &[u8]) -> bool { + slot == ZERO_HASH +} + +/// SkipEmpty: `sha256(3-byte header || non-zero slots)`. All-zero node hashes to `[0; 32]`. +fn hash_internal_node(proof_node: &[u8]) -> [u8; 32] { + let mut data = Vec::with_capacity(INTERNAL_NODE_LEN); + data.extend_from_slice(&proof_node[..INTERNAL_NODE_HEADER]); + + for i in 0..INTERNAL_NODE_SLOTS { + let start = INTERNAL_NODE_HEADER + i * INTERNAL_NODE_SLOT_SIZE; + let slot = &proof_node[start..start + INTERNAL_NODE_SLOT_SIZE]; + if !is_zero_slot(slot) { + data.extend_from_slice(slot); + } + } + + if data.len() == INTERNAL_NODE_HEADER { + ZERO_HASH + } else { + sha256(&data) + } +} + +fn compute_node_hash(proof_node: &[u8]) -> Option<[u8; 32]> { + match proof_node.len() { + LEAF_NODE_LEN => Some(sha256(proof_node)), + INTERNAL_NODE_LEN => Some(hash_internal_node(proof_node)), + MSU_ROOT_NODE_LEN => Some(sha256(proof_node)), + _ => None, + } +} + +fn is_leaf(node: &[u8]) -> bool { + node.len() == LEAF_NODE_LEN && node[0] == LEAF_NODE_TYPE +} + +/// Bottom-up hash chain walk from last node to root. +/// +/// Uses `nibble_at_depth(sha256(key))` to locate child slots in internal nodes +/// (index > 0), ensuring the proof path follows the key's trie path. The MSU root +/// (index 0) uses `next_begin_offset` because its 256-slot addressing scheme is +/// Pharos-specific and opaque; the MSU root content is pinned to the state root +/// via its hash, so an attacker cannot substitute a different MSU root. +fn verify_proof_walk( + proof_nodes: &[PharosProofNode], + key: &[u8], + root: &[u8; 32], +) -> Result<(), Error> { + let last = proof_nodes.last().ok_or(Error::EmptyProof)?; + let mut current_hash = compute_node_hash(&last.proof_node).ok_or(Error::InvalidNodeLength)?; + let key_hash = sha256(key); + + for i in (0..proof_nodes.len()).rev().skip(1) { + let parent = &proof_nodes[i]; + + let start = if i == 0 { + parent.next_begin_offset as usize + } else { + let trie_depth = i - 1; + let nibble = nibble_at_depth(&key_hash, trie_depth) as usize; + INTERNAL_NODE_HEADER + nibble * INTERNAL_NODE_SLOT_SIZE + }; + + let slot = parent + .proof_node + .get(start..start + INTERNAL_NODE_SLOT_SIZE) + .ok_or(Error::SlotOutOfBounds)?; + + if slot != current_hash { + return Err(Error::HashChainBroken); + } + + current_hash = compute_node_hash(&parent.proof_node).ok_or(Error::InvalidNodeLength)?; + } + + if current_hash == *root { + Ok(()) + } else { + Err(Error::RootMismatch) + } +} + +/// Builds a storage trie key by concatenating address and slot hash. +pub fn build_storage_key(address: &[u8; 20], slot_hash: &[u8; 32]) -> [u8; 52] { + let mut key = [0u8; 52]; + key[..20].copy_from_slice(address); + key[20..].copy_from_slice(slot_hash); + key +} + +/// Verify that a key-value pair exists in the trie. +pub fn verify_proof( + proof_nodes: &[PharosProofNode], + key: &[u8], + value: &[u8], + root: &[u8; 32], +) -> Result<(), Error> { + let last = proof_nodes.last().ok_or(Error::EmptyProof)?; + + if !is_leaf(&last.proof_node) { + return Err(Error::InvalidLeaf); + } + + if last.proof_node[1..33] != sha256(key) { + return Err(Error::KeyMismatch); + } + + if last.proof_node[33..65] != sha256(value) { + return Err(Error::ValueMismatch); + } + + verify_proof_walk(proof_nodes, key, root) +} + +/// Verify that a key exists in the trie (inclusion proof). +/// Returns the value hash from the leaf on success. +pub fn verify_membership_proof( + proof_nodes: &[PharosProofNode], + key: &[u8], + root: &[u8; 32], +) -> Result<[u8; 32], Error> { + let last = proof_nodes.last().ok_or(Error::EmptyProof)?; + + if !is_leaf(&last.proof_node) { + return Err(Error::InvalidLeaf); + } + + if last.proof_node[1..33] != sha256(key) { + return Err(Error::KeyMismatch); + } + + let mut value_hash = [0u8; 32]; + value_hash.copy_from_slice(&last.proof_node[33..65]); + + verify_proof_walk(proof_nodes, key, root)?; + Ok(value_hash) +} + +/// Verify that a key does NOT exist in the trie (non-inclusion proof). +/// +/// Case 1: Proof ends at a leaf with a different key_hash (path collision). +/// Case 2: Proof ends at an internal node where the target nibble slot is empty. +/// Sibling proofs pin the non-empty slots to the same root, preventing forgery. +pub fn verify_non_existence_proof( + proof_nodes: &[PharosProofNode], + key: &[u8], + root: &[u8; 32], + sibling_proofs: &[SiblingLeftmostLeafProof], +) -> Result<(), Error> { + let last_node = proof_nodes.last().ok_or(Error::EmptyProof)?; + let last = &last_node.proof_node; + let key_hash = sha256(key); + + // Case 1: leaf with different key + if is_leaf(last) { + if last[1..33] == key_hash { + return Err(Error::KeyExists); + } + return verify_proof_walk(proof_nodes, key, root); + } + + // Case 2: internal node with empty target slot + if last.len() != INTERNAL_NODE_LEN { + return Err(Error::InvalidTerminalNode); + } + + let depth = proof_nodes.len().saturating_sub(2); + let nibble = nibble_at_depth(&key_hash, depth) as usize; + let slot_start = INTERNAL_NODE_HEADER + nibble * INTERNAL_NODE_SLOT_SIZE; + + if slot_start + INTERNAL_NODE_SLOT_SIZE > last.len() { + return Err(Error::SlotOutOfBounds); + } + + if !is_zero_slot(&last[slot_start..slot_start + INTERNAL_NODE_SLOT_SIZE]) { + return Err(Error::TargetSlotNotEmpty); + } + + verify_proof_walk(proof_nodes, key, root)?; + + let non_empty_count = (0..INTERNAL_NODE_SLOTS) + .filter(|&i| { + i != nibble && { + let s = INTERNAL_NODE_HEADER + i * INTERNAL_NODE_SLOT_SIZE; + !is_zero_slot(&last[s..s + INTERNAL_NODE_SLOT_SIZE]) + } + }) + .count(); + + if non_empty_count > 0 { + if sibling_proofs.len() != non_empty_count { + return Err(Error::SiblingCountMismatch); + } + + let parent_nodes = &proof_nodes[..proof_nodes.len() - 1]; + let mut proven_slots = [false; INTERNAL_NODE_SLOTS]; + + for sib in sibling_proofs { + let idx = sib.slot_index as usize; + if idx >= INTERNAL_NODE_SLOTS || idx == nibble { + return Err(Error::InvalidSiblingSlot); + } + let s = INTERNAL_NODE_HEADER + idx * INTERNAL_NODE_SLOT_SIZE; + if is_zero_slot(&last[s..s + INTERNAL_NODE_SLOT_SIZE]) { + return Err(Error::InvalidSiblingSlot); + } + + if proven_slots[idx] { + return Err(Error::DuplicateSiblingSlot); + } + proven_slots[idx] = true; + + if sib.proof_path.is_empty() { + return Err(Error::EmptySiblingPath); + } + + let sib_key_hash = sha256(&sib.leftmost_leaf_key); + if nibble_at_depth(&sib_key_hash, depth) as usize != idx { + return Err(Error::SiblingNibbleMismatch); + } + + let mut combined: Vec = parent_nodes.to_vec(); + combined.extend_from_slice(&sib.proof_path); + + let is_valid_leaf = combined.last().map_or(false, |last| { + is_leaf(&last.proof_node) && + last.proof_node[1..33] == sha256(&sib.leftmost_leaf_key) + }); + + if !is_valid_leaf { + return Err(Error::SiblingProofInvalid(alloc::boxed::Box::new(Error::InvalidLeaf))); + } + + verify_proof_walk(&combined, &sib.leftmost_leaf_key, root) + .map_err(|e| Error::SiblingProofInvalid(alloc::boxed::Box::new(e)))?; + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn make_leaf(key: &[u8], value: &[u8]) -> Vec { + let mut leaf = vec![LEAF_NODE_TYPE]; + leaf.extend_from_slice(&sha256(key)); + leaf.extend_from_slice(&sha256(value)); + leaf + } + + fn make_internal_with_child(slot: usize, child_hash: &[u8; 32]) -> Vec { + let mut node = vec![0u8; INTERNAL_NODE_LEN]; + let start = INTERNAL_NODE_HEADER + slot * INTERNAL_NODE_SLOT_SIZE; + node[start..start + 32].copy_from_slice(child_hash); + node + } + + fn make_msu_root_with_child(slot: usize, child_hash: &[u8; 32]) -> Vec { + let mut node = vec![0u8; MSU_ROOT_NODE_LEN]; + let start = slot * INTERNAL_NODE_SLOT_SIZE; + node[start..start + 32].copy_from_slice(child_hash); + node + } + + fn node(data: impl Into>, begin: u32, end: u32) -> PharosProofNode { + let data = data.into(); + PharosProofNode { proof_node: data, next_begin_offset: begin, next_end_offset: end } + } + + #[test] + fn test_hash_internal_node_skip_empty() { + // All-zero node hashes to zero + let empty = vec![0u8; INTERNAL_NODE_LEN]; + assert_eq!(hash_internal_node(&empty), ZERO_HASH); + + // Node with one child: hash = sha256(header || child_hash) + let child_hash = sha256(b"test"); + let node = make_internal_with_child(5, &child_hash); + let mut expected_input = vec![0u8; INTERNAL_NODE_HEADER]; + expected_input.extend_from_slice(&child_hash); + assert_eq!(hash_internal_node(&node), sha256(&expected_input)); + + // SkipEmpty: moving a hash to a different slot produces a DIFFERENT result + // only if we track position (we don't — but the sibling proofs catch this) + let node_moved = make_internal_with_child(10, &child_hash); + // Both nodes have the same SkipEmpty hash since it's just sha256(header || child_hash) + assert_eq!(hash_internal_node(&node), hash_internal_node(&node_moved)); + // This proves why sibling proofs are necessary for non-existence! + } + + /// Build a 3-node proof (MSU root → internal → leaf) that follows the + /// key's nibble path. The internal node slot is derived from the key. + fn build_proof_for_key(key: &[u8], value: &[u8]) -> (Vec, [u8; 32]) { + let leaf_data = make_leaf(key, value); + let leaf_hash = sha256(&leaf_data); + + // Internal node slot must match key's nibble at depth 0 + let key_hash = sha256(key); + let nibble = nibble_at_depth(&key_hash, 0) as usize; + let internal = make_internal_with_child(nibble, &leaf_hash); + let internal_hash = hash_internal_node(&internal); + + // MSU root — slot is arbitrary (uses next_begin_offset) + let msu_root = make_msu_root_with_child(7, &internal_hash); + let root = sha256(&msu_root); + let msu_offset = (7 * INTERNAL_NODE_SLOT_SIZE) as u32; + + let proof = vec![ + node(msu_root, msu_offset, msu_offset + 32), + node(internal, 0, 0), // offsets unused for i > 0 + node(leaf_data, 0, 0), + ]; + (proof, root) + } + + #[test] + fn test_existence_proof_valid() { + let key = b"test_key"; + let value = b"test_value"; + let (proof, root) = build_proof_for_key(key, value); + assert!(verify_proof(&proof, key, value, &root).is_ok()); + } + + #[test] + fn test_existence_proof_wrong_value_rejected() { + let key = b"test_key"; + let value = b"test_value"; + let (proof, root) = build_proof_for_key(key, value); + + assert!(verify_proof(&proof, key, b"wrong_value", &root).is_err()); + assert!(verify_proof(&proof, b"wrong_key", value, &root).is_err()); + } + + #[test] + fn test_membership_proof_returns_value_hash() { + let key = b"test_key"; + let value = b"test_value"; + let (proof, root) = build_proof_for_key(key, value); + + let result = verify_membership_proof(&proof, key, &root); + assert_eq!(result.unwrap(), sha256(value)); + + // Wrong key returns error + assert!(verify_membership_proof(&proof, b"wrong", &root).is_err()); + } + + #[test] + fn test_non_existence_case1_leaf_mismatch() { + // Proof ends at a leaf with a different key. For Case 1 to be valid, + // the query key and the leaf key must share the same nibble path through + // the trie (they collide at the leaf level but have different key_hashes). + let other_key = b"other_key"; + let other_value = b"other_value"; + let query_key = b"missing_key"; + + let leaf_data = make_leaf(other_key, other_value); + let leaf_hash = sha256(&leaf_data); + + // Place the leaf at the slot matching the QUERY key's nibble at depth 0. + let query_key_hash = sha256(query_key); + let query_nibble = nibble_at_depth(&query_key_hash, 0) as usize; + + let internal = make_internal_with_child(query_nibble, &leaf_hash); + let internal_hash = hash_internal_node(&internal); + + let msu_root = make_msu_root_with_child(7, &internal_hash); + let root = sha256(&msu_root); + let msu_offset = (7 * INTERNAL_NODE_SLOT_SIZE) as u32; + + let proof = vec![ + node(msu_root, msu_offset, msu_offset + 32), + node(internal, 0, 0), // offsets unused by key-aware walk for i > 0 + node(leaf_data, 0, 0), + ]; + + // Non-existence for query key succeeds (leaf has different key_hash) + assert!(verify_non_existence_proof(&proof, query_key, &root, &[]).is_ok()); + + // Non-existence for the actual key fails (it exists!) + assert!(verify_non_existence_proof(&proof, other_key, &root, &[]).is_err()); + } + + #[test] + fn test_non_existence_case1_wrong_path_rejected() { + // A leaf exists in the trie, but the proof path does NOT follow the + // query key's nibbles at the internal node level. This should be rejected. + let other_key = b"other_key"; + let other_value = b"other_value"; + let query_key = b"missing_key"; + + let leaf_data = make_leaf(other_key, other_value); + let leaf_hash = sha256(&leaf_data); + + // Place the leaf at a DIFFERENT internal-node slot than the query key's nibble. + let query_key_hash = sha256(query_key); + let query_nibble = nibble_at_depth(&query_key_hash, 0) as usize; + let wrong_slot = (query_nibble + 1) % INTERNAL_NODE_SLOTS; + + let internal = make_internal_with_child(wrong_slot, &leaf_hash); + let internal_hash = hash_internal_node(&internal); + let wrong_offset = (INTERNAL_NODE_HEADER + wrong_slot * INTERNAL_NODE_SLOT_SIZE) as u32; + + let msu_root = make_msu_root_with_child(7, &internal_hash); + let root = sha256(&msu_root); + let msu_offset = (7 * INTERNAL_NODE_SLOT_SIZE) as u32; + + let proof = vec![ + node(msu_root, msu_offset, msu_offset + 32), + node(internal, wrong_offset, wrong_offset + 32), + node(leaf_data, 0, 0), + ]; + + // Rejected: the internal node has the leaf at the wrong nibble slot + assert!(verify_non_existence_proof(&proof, query_key, &root, &[]).is_err()); + } + + #[test] + fn test_non_existence_case2_empty_slot_all_zero_terminal() { + // Terminal node is all zeros, no sibling proofs needed + let empty_internal = vec![0u8; INTERNAL_NODE_LEN]; + let empty_hash = ZERO_HASH; // all-zero node hashes to zero + + let parent = make_internal_with_child(5, &empty_hash); + let parent_hash = hash_internal_node(&parent); + let parent_offset = (INTERNAL_NODE_HEADER + 5 * INTERNAL_NODE_SLOT_SIZE) as u32; + + let msu_root = make_msu_root_with_child(2, &parent_hash); + let root = sha256(&msu_root); + let msu_offset = (2 * INTERNAL_NODE_SLOT_SIZE) as u32; + + let proof = vec![ + node(msu_root, msu_offset, msu_offset + 32), + node(parent, parent_offset, parent_offset + 32), + node(empty_internal, 0, 0), + ]; + + assert!(verify_non_existence_proof(&proof, b"any_key", &root, &[]).is_ok()); + } + + #[test] + fn test_non_existence_missing_sibling_rejected() { + // Terminal node has a non-empty slot but no sibling proof provided + let child_hash = sha256(b"some_child"); + let terminal = make_internal_with_child(5, &child_hash); + let terminal_hash = hash_internal_node(&terminal); + + let parent_offset = (INTERNAL_NODE_HEADER + 3 * INTERNAL_NODE_SLOT_SIZE) as u32; + let parent = make_internal_with_child(3, &terminal_hash); + let parent_hash = hash_internal_node(&parent); + + let msu_root = make_msu_root_with_child(0, &parent_hash); + let root = sha256(&msu_root); + + let proof = vec![ + node(msu_root, 0, 32), + node(parent, parent_offset, parent_offset + 32), + node(terminal, 0, 0), + ]; + + // Terminal has 1 non-empty slot (slot 5) but 0 sibling proofs + // This must fail, attacker could have moved a hash via SkipEmpty + assert!(verify_non_existence_proof(&proof, b"any_key", &root, &[]).is_err()); + } + + #[test] + fn test_skip_empty_hash_slot_position_invariance() { + // Demonstrates the SkipEmpty attack vector: moving a hash between slots + // produces the same node hash, which is why sibling proofs are required + let child_hash = sha256(b"data"); + let node_a = make_internal_with_child(3, &child_hash); + let node_b = make_internal_with_child(11, &child_hash); + + // Same hash despite different slot positions + assert_eq!(hash_internal_node(&node_a), hash_internal_node(&node_b)); + } + + #[test] + fn test_empty_proof_rejected() { + assert!(verify_proof(&[], b"key", b"value", &[0; 32]).is_err()); + assert!(verify_membership_proof(&[], b"key", &[0; 32]).is_err()); + assert!(verify_non_existence_proof(&[], b"key", &[0; 32], &[]).is_err()); + } + + #[test] + fn test_nibble_at_depth() { + let hash = [0xAB, 0xCD, 0xEF, 0x12]; // + more bytes + let mut full_hash = [0u8; 32]; + full_hash[..4].copy_from_slice(&hash); + + // depth 0: low nibble of byte 0 = 0xB + assert_eq!(nibble_at_depth(&full_hash, 0), 0x0B); + // depth 1: high nibble of byte 0 = 0xA + assert_eq!(nibble_at_depth(&full_hash, 1), 0x0A); + // depth 2: low nibble of byte 1 = 0xD + assert_eq!(nibble_at_depth(&full_hash, 2), 0x0D); + // depth 3: high nibble of byte 1 = 0xC + assert_eq!(nibble_at_depth(&full_hash, 3), 0x0C); + } +} diff --git a/modules/consensus/pharos/primitives/src/types.rs b/modules/consensus/pharos/primitives/src/types.rs new file mode 100644 index 000000000..2035820dc --- /dev/null +++ b/modules/consensus/pharos/primitives/src/types.rs @@ -0,0 +1,270 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Type definitions for Pharos consensus. + +use crate::constants::BlsPublicKey; +use alloc::{collections::BTreeMap, vec::Vec}; +use codec::{Decode, Encode}; +use core::cmp::Ordering; +use geth_primitives::CodecHeader; +use primitive_types::{H256, U256}; + +/// Unique identifier for a validator pool in the staking contract +pub type PoolId = H256; + +/// Information about a single validator. +/// +/// Each validator has a BLS public key for signing blocks and a stake amount +/// that determines their voting power in consensus. +/// +/// Validators are keyed by their BLS public key in the ValidatorSet BTreeMap. +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ValidatorInfo { + /// The validator's BLS public key (48 bytes compressed) + pub bls_public_key: BlsPublicKey, + /// The validator's pool ID in the staking contract + pub pool_id: PoolId, + /// The stake amount + pub stake: U256, +} + +impl PartialOrd for ValidatorInfo { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ValidatorInfo { + fn cmp(&self, other: &Self) -> Ordering { + self.bls_public_key.cmp(&other.bls_public_key) + } +} + +/// The complete validator set for a given epoch. +/// +/// This represents the set of validators that are eligible to sign blocks +/// during a specific epoch. The validator set is updated at epoch boundaries(last block of an +/// epoch). +/// +/// Uses `BTreeMap` keyed by BLS public key for O(log n) lookups. +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ValidatorSet { + /// Validators keyed by BLS public key + pub validators: BTreeMap, + /// Total stake across all validators + pub total_stake: U256, + /// The epoch this validator set is valid for + pub epoch: u64, +} + +impl ValidatorSet { + /// Create a new empty validator set + pub fn new(epoch: u64) -> Self { + Self { validators: BTreeMap::new(), total_stake: U256::zero(), epoch } + } + + /// Add a validator to the set. + /// Returns true if the validator was added, false if it was a duplicate. + pub fn add_validator(&mut self, validator: ValidatorInfo) -> bool { + let key = validator.bls_public_key.clone(); + let stake = validator.stake; + if self.validators.contains_key(&key) { + false + } else { + self.validators.insert(key, validator); + self.total_stake = self.total_stake.saturating_add(stake); + true + } + } + + /// Check if a validator is in the set by their BLS public key + pub fn contains(&self, bls_key: &BlsPublicKey) -> bool { + self.validators.contains_key(bls_key) + } + + /// Get a validator by their BLS public key + pub fn get_validator(&self, bls_key: &BlsPublicKey) -> Option<&ValidatorInfo> { + self.validators.get(bls_key) + } + + /// Calculate the stake of participating validators + pub fn participating_stake(&self, participants: &[BlsPublicKey]) -> U256 { + participants + .iter() + .filter_map(|key| self.get_validator(key)) + .fold(U256::zero(), |acc, v| acc.saturating_add(v.stake)) + } + + /// Check if participating stake meets the 2/3 + 1 threshold + pub fn has_supermajority(&self, participants: &[BlsPublicKey]) -> bool { + let participating = self.participating_stake(participants); + let required = (self.total_stake * 2 / 3) + 1; + participating >= required + } + + /// Get the number of validators in the set + pub fn len(&self) -> usize { + self.validators.len() + } + + /// Check if the validator set is empty + pub fn is_empty(&self) -> bool { + self.validators.is_empty() + } +} + +/// Block proof containing the BLS signature data. +/// +/// This contains the aggregated BLS signature for a block and the list +/// of participating validators who signed it. +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct BlockProof { + /// The aggregated BLS signature from participating validators (96 bytes) + pub aggregate_signature: Vec, + /// List of BLS public keys of validators who participated in signing + pub participant_keys: Vec, + /// The block proof hash from the RPC, the message validators actually signed + pub block_proof_hash: H256, +} + +impl BlockProof { + pub fn participant_count(&self) -> usize { + self.participant_keys.len() + } +} + +/// Single node in a Pharos hexary hash tree proof path. +/// +/// Each proof node contains the raw node bytes and offsets indicating where +/// the child hash appears within this node (used for bottom-up verification). +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct PharosProofNode { + /// Raw bytes of this proof node + pub proof_node: Vec, + /// Start offset within this node where the next (child) hash begins + pub next_begin_offset: u32, + /// End offset within this node where the next (child) hash ends + pub next_end_offset: u32, +} + +/// Sibling proof for non-existence verification — proves a non-empty sibling slot +/// is genuine by providing a path to its leftmost leaf. +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct SiblingLeftmostLeafProof { + pub slot_index: u8, + pub leftmost_leaf_key: Vec, + pub proof_path: Vec, +} + +/// Non-existence proof: either a leaf key mismatch (empty sibling_proofs) or +/// an empty slot in an internal node (sibling_proofs pin non-empty slots to root). +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct NonExistenceProof { + pub proof_nodes: Vec, + pub sibling_proofs: Vec, +} + +/// State proof for validator set stored in the staking contract. +/// +/// This proof is required when the validator set changes at epoch boundaries. +/// The validator set is decoded directly from the proof. +/// +/// Uses Pharos hexary hash tree proofs (SHA-256) instead of Ethereum MPT (Keccak-256). +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +pub struct ValidatorSetProof { + /// Per-key storage proof nodes (storage key -> proof path, verified against state_root) + pub storage_proof: BTreeMap>, + /// Raw storage values in order: [totalStake, activePoolSets length, + /// pool_id_0..pool_id_n, validator_0_bls_header, validator_0_bls_data_0..2, + /// validator_0_stake, ...] + pub storage_values: Vec>, +} + +/// The trusted state maintained by the Pharos consensus client. +/// +/// This state is updated as new blocks are verified and represents +/// the current view of the chain from the light client's perspective. +#[derive(Debug, Clone, Encode, Decode)] +pub struct VerifierState { + /// The current (active) validator set + pub current_validator_set: ValidatorSet, + /// The latest finalized block number + pub finalized_block_number: u64, + /// The hash of the finalized header + pub finalized_hash: H256, + /// The current epoch number + pub current_epoch: u64, +} + +impl VerifierState { + /// Create a new verifier state with initial trusted state + pub fn new( + initial_validator_set: ValidatorSet, + initial_block_number: u64, + initial_hash: H256, + ) -> Self { + let epoch = initial_validator_set.epoch; + Self { + current_validator_set: initial_validator_set, + finalized_block_number: initial_block_number, + finalized_hash: initial_hash, + current_epoch: epoch, + } + } +} + +/// Data required to update the verifier state. +/// +/// This is what the prover submits to advance the light client's state. +#[derive(Debug, Clone, Encode, Decode)] +pub struct VerifierStateUpdate { + /// The header being attested to + pub header: CodecHeader, + /// Block proof from debug_getBlockProof containing the BLS signature + pub block_proof: BlockProof, + /// Optional validator set update proof (required at epoch boundaries) + pub validator_set_proof: Option, +} + +impl VerifierStateUpdate { + /// Get the block number from the header + pub fn block_number(&self) -> u64 { + self.header.number.low_u64() + } + + /// Check if this update includes a validator set rotation + pub fn has_validator_set_update(&self) -> bool { + self.validator_set_proof.is_some() + } +} + +/// Result of successful verification +#[derive(Debug, Clone, Encode, Decode)] +pub struct VerificationResult { + /// The verified block hash + pub block_hash: H256, + /// The verified header + pub header: CodecHeader, + /// The new validator set if this was an epoch boundary block + pub new_validator_set: Option, +} diff --git a/modules/consensus/pharos/prover/Cargo.toml b/modules/consensus/pharos/prover/Cargo.toml new file mode 100644 index 000000000..16aa3431b --- /dev/null +++ b/modules/consensus/pharos/prover/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "pharos-prover" +version = "0.1.0" +edition = "2021" +description = "Pharos consensus prover" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +pharos-primitives = { path = "../primitives" } +pharos-verifier = { path = "../verifier" } +geth-primitives = { workspace = true } +primitive-types = { workspace = true } +thiserror = { workspace = true } +reqwest = { workspace = true, features = ["json", "rustls-tls"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { package = "serde_json", version = "1.0.99" } +sp-core = { workspace = true } +hex = { workspace = true } +ethabi = { workspace = true } +alloy-provider = { workspace = true } +alloy-eips = { workspace = true } diff --git a/modules/consensus/pharos/prover/src/error.rs b/modules/consensus/pharos/prover/src/error.rs new file mode 100644 index 000000000..b8725bc40 --- /dev/null +++ b/modules/consensus/pharos/prover/src/error.rs @@ -0,0 +1,96 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use thiserror::Error; + +/// Errors that can occur during proof generation. +#[derive(Error, Debug)] +pub enum ProverError { + /// HTTP request failed + #[error("HTTP request failed: {0}")] + HttpRequest(#[from] reqwest::Error), + + /// JSON deserialization failed + #[error("JSON deserialization failed")] + JsonDeserialization, + + /// JSON-RPC returned an error + #[error("RPC error (code {code}): {message}")] + RpcError { code: i64, message: String }, + + /// RPC response missing result field + #[error("RPC response missing result")] + MissingRpcResult, + + /// Block not found at the specified height + #[error("Block not found: {0}")] + BlockNotFound(u64), + + /// Block proof not available (debug_getBlockProof may be disabled) + #[error("Block proof not available for block {0}")] + BlockProofNotAvailable(u64), + + /// Hex decoding failed + #[error("Invalid hex encoding")] + HexDecode, + + /// Invalid number format + #[error("Invalid number format")] + InvalidNumber, + + /// Invalid address length + #[error("Invalid address length: expected 20, got {0}")] + InvalidAddressLength(usize), + + /// Invalid H256 length + #[error("Invalid H256 length: expected 32, got {0}")] + InvalidH256Length(usize), + + /// Invalid logs bloom length + #[error("Invalid logs bloom length: expected 256, got {0}")] + InvalidLogsBloomLength(usize), + + /// Invalid BLS public key length + #[error("Invalid BLS public key length: expected 48, got {0}")] + InvalidBlsKeyLength(usize), + + /// Invalid BLS signature length + #[error("Invalid BLS signature length: expected 96, got {0}")] + InvalidBlsSignatureLength(usize), + + /// Validator set proof required but not available + #[error("Validator set proof required but not available")] + ValidatorSetProofRequired, + + /// Storage proof verification failed + #[error("Storage proof verification failed")] + StorageProofVerification, + + /// Missing storage value + #[error("Missing storage value at slot index {0}")] + MissingStorageValue(usize), + + /// Missing storage proof in eth_getProof response + #[error("Missing storage proof for {0}")] + MissingStorageProof(&'static str), + + /// Invalid RPC URL + #[error("Invalid RPC URL: {0}")] + InvalidUrl(String), + + /// Provider transport error + #[error("Provider error: {0}")] + ProviderError(String), +} diff --git a/modules/consensus/pharos/prover/src/lib.rs b/modules/consensus/pharos/prover/src/lib.rs new file mode 100644 index 000000000..56e155c50 --- /dev/null +++ b/modules/consensus/pharos/prover/src/lib.rs @@ -0,0 +1,439 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos consensus prover for light client. + +pub mod error; +pub mod rpc; + +pub use error::ProverError; + +use pharos_primitives::{ + BlockProof, BlsPublicKey, Config, PharosProofNode, SiblingLeftmostLeafProof, ValidatorInfo, + ValidatorSet, ValidatorSetProof, VerifierStateUpdate, STAKING_CONTRACT_ADDRESS, +}; +use pharos_verifier::state_proof::StakingContractLayout; +use primitive_types::{H160, H256, U256}; +use rpc::{ + hex_to_bytes, hex_to_u64, PharosRpcClient, RpcBlockProof, RpcProofNode, RpcSiblingProof, + RpcValidatorInfo, +}; +use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; + +/// Pharos prover for constructing light client updates. +#[derive(Clone)] +pub struct PharosProver { + pub rpc: Arc, + storage_layout: StakingContractLayout, + /// Epoch length in blocks, read from staking contract slot 5 at init. + /// Falls back to `C::EPOCH_LENGTH_BLOCKS` if the RPC call fails. + pub epoch_length: u64, + _config: PhantomData, +} + +impl PharosProver { + /// Create a new prover with the given RPC endpoint. + /// Reads `epochLength` from the staking contract at the latest block. + pub async fn new(endpoint: impl Into) -> Result { + let rpc = Arc::new(PharosRpcClient::new(endpoint)?); + let epoch_length = Self::fetch_epoch_length(&rpc).await; + Ok(Self { + rpc, + storage_layout: StakingContractLayout::default(), + epoch_length, + _config: PhantomData, + }) + } + + /// Create a new prover with a custom storage layout. + pub async fn with_storage_layout( + endpoint: impl Into, + layout: StakingContractLayout, + ) -> Result { + let rpc = Arc::new(PharosRpcClient::new(endpoint)?); + let epoch_length = Self::fetch_epoch_length(&rpc).await; + Ok(Self { rpc, storage_layout: layout, epoch_length, _config: PhantomData }) + } + + async fn fetch_epoch_length(rpc: &PharosRpcClient) -> u64 { + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let latest = rpc.get_block_number().await.unwrap_or(0); + if latest == 0 { + return C::EPOCH_LENGTH_BLOCKS; + } + // Slot 5 = epochLength + match rpc.get_storage_at(address, U256::from(5), latest).await { + Ok(val) if !val.is_zero() => val.low_u64(), + _ => C::EPOCH_LENGTH_BLOCKS, + } + } + + /// Fetch the latest block number from the node. + pub async fn get_latest_block(&self) -> Result { + self.rpc.get_block_number().await + } + + /// Fetch a block update for the given block number. + /// + /// This will: + /// 1. Fetch the block header + /// 2. Fetch the block proof + /// 3. If at epoch boundary, fetch validator set proof + pub async fn fetch_block_update( + &self, + block_number: u64, + ) -> Result { + let header = self.rpc.get_block_by_number(block_number).await?; + + let rpc_proof = self.rpc.get_block_proof(block_number).await?; + let block_proof = self.convert_rpc_block_proof(&rpc_proof)?; + + let validator_set_proof = if C::is_epoch_boundary(block_number) { + Some(self.fetch_validator_set_proof(block_number).await?) + } else { + None + }; + + Ok(VerifierStateUpdate { header, block_proof, validator_set_proof }) + } + + /// Fetch only the block proof for a given block number. + pub async fn fetch_block_proof(&self, block_number: u64) -> Result { + let rpc_proof = self.rpc.get_block_proof(block_number).await?; + self.convert_rpc_block_proof(&rpc_proof) + } + + /// Build a ValidatorSet from RPC validator info. + pub fn build_validator_set( + &self, + validators: &[RpcValidatorInfo], + epoch: u64, + ) -> Result { + let mut validator_set = ValidatorSet::new(epoch); + + for v in validators { + let bls_key_bytes = hex_to_bytes(&v.bls_key)?; + let len = bls_key_bytes.len(); + let bls_public_key: BlsPublicKey = + bls_key_bytes.try_into().map_err(|_| ProverError::InvalidBlsKeyLength(len))?; + + let pool_id_bytes = hex_to_bytes(&v.validator_id)?; + let pool_id = if pool_id_bytes.len() == 32 { + H256::from_slice(&pool_id_bytes) + } else { + let mut padded = [0u8; 32]; + let start = 32usize.saturating_sub(pool_id_bytes.len()); + padded[start..].copy_from_slice(&pool_id_bytes); + H256::from(padded) + }; + + let stake = Self::parse_stake(&v.staking)?; + + let info = ValidatorInfo { bls_public_key, pool_id, stake }; + validator_set.add_validator(info); + } + + Ok(validator_set) + } + + /// Parse a hex stake value to U256. + fn parse_stake(hex: &str) -> Result { + let hex = hex.trim_start_matches("0x"); + U256::from_str_radix(hex, 16).map_err(|_| ProverError::InvalidNumber) + } + + /// Fetch validator set proof for an epoch boundary block. + /// + /// This fetches the storage proof from the staking contract at the + /// given block, which contains the validator set for the next epoch. + /// + /// The storage layout follows the Pharos staking contract (V2): + /// - Slot 6: totalStake + /// - Slot 21: activePoolSets (EnumerableSet._inner._values array length) + /// - keccak256(21): array elements (pool IDs) + /// - For each pool ID: validator data via mapping at slot 0 + pub async fn fetch_validator_set_proof( + &self, + block_number: u64, + ) -> Result { + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + + // Fetch base slots (totalStake, activePoolSets length) + let base_keys = vec![ + self.storage_layout.raw_slot_key(self.storage_layout.total_stake_slot), + self.storage_layout.raw_slot_key(self.storage_layout.active_pool_set_slot), + ]; + + let base_proof = self.rpc.get_proof(address, base_keys.clone(), block_number).await?; + + // Get validator count from activePoolSets length (slot 21) + let validator_count = if base_proof.storage_proof.len() >= 2 { + hex_to_u64(&base_proof.storage_proof[1].value)? + } else { + return Err(ProverError::MissingStorageProof("activePoolSets length")); + }; + + // Fetch pool IDs from the activePoolSets array + let mut pool_id_keys = Vec::new(); + for i in 0..validator_count { + pool_id_keys.push(self.storage_layout.array_element_key_with( + self.storage_layout.active_pool_set_slot, + i, + |data| H256::from(keccak256(data)), + )); + } + + if pool_id_keys.is_empty() { + return Err(ProverError::MissingStorageProof("activePoolSets array is empty")); + } + + let pool_id_proof = self.rpc.get_proof(address, pool_id_keys.clone(), block_number).await?; + + // Extract pool IDs + let mut pool_ids = Vec::new(); + for sp in &pool_id_proof.storage_proof { + let bytes = hex_to_bytes(&sp.value)?; + let mut padded = [0u8; 32]; + if bytes.len() <= 32 { + padded[32 - bytes.len()..].copy_from_slice(&bytes); + } + pool_ids.push(H256::from(padded)); + } + + // Collect storage values and per-key proof paths. + // Each storage key maps to its own proof path for individual verification. + let mut storage_proof: BTreeMap> = BTreeMap::new(); + let mut storage_values: Vec> = Vec::new(); + + for (i, sp) in base_proof.storage_proof.iter().enumerate() { + storage_proof.insert(base_keys[i], rpc_to_proof_nodes(&sp.proof)?); + storage_values.push(hex_to_bytes(&sp.value)?); + } + for (i, sp) in pool_id_proof.storage_proof.iter().enumerate() { + storage_proof.insert(pool_id_keys[i], rpc_to_proof_nodes(&sp.proof)?); + storage_values.push(hex_to_bytes(&sp.value)?); + } + + // Fetch validator data in two batched RPC calls (instead of 2 per validator): + // Phase 1: batch all validators' BLS string headers + stakes into one call. + // Phase 2: use phase 1 results to compute BLS data slot keys, batch into one call. + + // Build phase 1 keys: [bls_header_0, stake_0, bls_header_1, stake_1, ...] + let mut phase1_all_keys = Vec::new(); + let mut validator_slots: Vec<(H256, H256)> = Vec::new(); // (bls_string_slot, stake_slot) + for pool_id in &pool_ids { + let (bls_string_slot, stake_slot) = self.get_validator_header_and_stake_keys(pool_id); + phase1_all_keys.push(bls_string_slot); + phase1_all_keys.push(stake_slot); + validator_slots.push((bls_string_slot, stake_slot)); + } + + let phase1_proof = self.rpc.get_proof(address, phase1_all_keys, block_number).await?; + + if phase1_proof.storage_proof.len() < pool_ids.len() * 2 { + return Err(ProverError::MissingStorageProof("BLS header or stake in phase 1")); + } + + // Process phase 1 results and build phase 2 keys + let mut phase2_all_keys = Vec::new(); + // Track per-validator: (bls_string_slot, stake_slot, phase1_header_idx, phase1_stake_idx, + // data_keys) + struct ValidatorPhaseInfo { + bls_string_slot: H256, + stake_slot: H256, + phase1_header_idx: usize, + phase1_stake_idx: usize, + data_keys: Vec, + } + let mut validator_info = Vec::new(); + + for (v_idx, (bls_string_slot, stake_slot)) in validator_slots.iter().enumerate() { + let header_idx = v_idx * 2; + let stake_idx = v_idx * 2 + 1; + + let header_hex = &phase1_proof.storage_proof[header_idx].value; + let data_slot_count = bls_data_slots_from_hex(header_hex)?; + + let bls_data_base = H256::from(keccak256(bls_string_slot.as_bytes())); + let bls_data_base_pos = U256::from_big_endian(bls_data_base.as_bytes()); + let mut data_keys = Vec::new(); + for i in 0..data_slot_count { + let key = H256((bls_data_base_pos + U256::from(i)).to_big_endian()); + phase2_all_keys.push(key); + data_keys.push(key); + } + + validator_info.push(ValidatorPhaseInfo { + bls_string_slot: *bls_string_slot, + stake_slot: *stake_slot, + phase1_header_idx: header_idx, + phase1_stake_idx: stake_idx, + data_keys, + }); + } + + // Phase 2: fetch all BLS data slots in one call + let phase2_proof = if !phase2_all_keys.is_empty() { + Some(self.rpc.get_proof(address, phase2_all_keys, block_number).await?) + } else { + None + }; + + // Assemble results per validator in order: [header, data_0..N, stake] + let mut phase2_offset = 0; + for vi in &validator_info { + // Header + storage_proof.insert( + vi.bls_string_slot, + rpc_to_proof_nodes(&phase1_proof.storage_proof[vi.phase1_header_idx].proof)?, + ); + storage_values + .push(hex_to_bytes(&phase1_proof.storage_proof[vi.phase1_header_idx].value)?); + + // Data slots + if let Some(ref p2) = phase2_proof { + for (j, key) in vi.data_keys.iter().enumerate() { + storage_proof.insert( + *key, + rpc_to_proof_nodes(&p2.storage_proof[phase2_offset + j].proof)?, + ); + storage_values.push(hex_to_bytes(&p2.storage_proof[phase2_offset + j].value)?); + } + } + phase2_offset += vi.data_keys.len(); + + // Stake + storage_proof.insert( + vi.stake_slot, + rpc_to_proof_nodes(&phase1_proof.storage_proof[vi.phase1_stake_idx].proof)?, + ); + storage_values + .push(hex_to_bytes(&phase1_proof.storage_proof[vi.phase1_stake_idx].value)?); + } + + Ok(ValidatorSetProof { storage_proof, storage_values }) + } + + /// Get the BLS string header slot and stake slot for a validator. + /// + /// These are fetched first (phase 1) to determine the dynamic BLS data slot count + /// from the string header value before fetching the data slots (phase 2). + fn get_validator_header_and_stake_keys(&self, pool_id: &H256) -> (H256, H256) { + const BLS_PUBLIC_KEY_OFFSET: u64 = 3; + const TOTAL_STAKE_OFFSET: u64 = 8; + + // Calculate validator base slot: keccak256(pool_id || mapping_slot) + let mut data = [0u8; 64]; + data[..32].copy_from_slice(pool_id.as_bytes()); + data[32..64].copy_from_slice( + &U256::from(self.storage_layout.validators_mapping_slot).to_big_endian(), + ); + let base_slot = H256::from(keccak256(&data)); + let base_pos = U256::from_big_endian(base_slot.as_bytes()); + + let bls_string_slot = H256((base_pos + U256::from(BLS_PUBLIC_KEY_OFFSET)).to_big_endian()); + let stake_slot = H256((base_pos + U256::from(TOTAL_STAKE_OFFSET)).to_big_endian()); + + (bls_string_slot, stake_slot) + } + + /// Convert RPC block proof to BlockProof. + fn convert_rpc_block_proof( + &self, + rpc_proof: &RpcBlockProof, + ) -> Result { + let aggregate_signature = hex_to_bytes(&rpc_proof.bls_aggregated_signature)?; + + let mut participant_keys: Vec = rpc_proof + .signed_bls_keys + .iter() + .map(|k| { + let bytes = hex_to_bytes(k)?; + let len = bytes.len(); + bytes.try_into().map_err(|_| ProverError::InvalidBlsKeyLength(len)) + }) + .collect::, _>>()?; + participant_keys.dedup(); + + let block_proof_hash = { + let bytes = hex_to_bytes(&rpc_proof.block_proof_hash)?; + if bytes.len() != 32 { + return Err(ProverError::InvalidH256Length(bytes.len())); + } + H256::from_slice(&bytes) + }; + + Ok(BlockProof { aggregate_signature, participant_keys, block_proof_hash }) + } +} + +/// Convert RPC proof nodes to PharosProofNode format. +pub fn rpc_to_proof_nodes(nodes: &[RpcProofNode]) -> Result, ProverError> { + nodes + .iter() + .map(|n| { + Ok(PharosProofNode { + proof_node: hex_to_bytes(&n.proof_node)?, + next_begin_offset: n.next_begin_offset, + next_end_offset: n.next_end_offset, + }) + }) + .collect() +} + +pub fn rpc_to_sibling_proofs( + siblings: &[RpcSiblingProof], +) -> Result, ProverError> { + siblings + .iter() + .map(|s| { + Ok(SiblingLeftmostLeafProof { + slot_index: s.slot_index, + leftmost_leaf_key: hex_to_bytes(&s.leftmost_leaf_key)?, + proof_path: rpc_to_proof_nodes(&s.proof_path)?, + }) + }) + .collect() +} + +/// Determine the number of BLS data slots from a hex-encoded string header value. +/// +/// For Solidity long strings, the header slot contains `length * 2 + 1`. +/// The byte length is `(value - 1) / 2`, and the slot count is `ceil(length / 32)`. +fn bls_data_slots_from_hex(hex_value: &str) -> Result { + let bytes = hex_to_bytes(hex_value)?; + let mut padded = [0u8; 32]; + if bytes.len() <= 32 { + padded[32 - bytes.len()..].copy_from_slice(&bytes); + } + let val = U256::from_big_endian(&padded); + let val_bytes = val.to_big_endian(); + let lowest_byte = val_bytes[31]; + + if lowest_byte & 1 == 0 { + // Short string - data is in the header itself + Ok(0) + } else { + // Long string - header = length * 2 + 1 + let length = (val - U256::from(1)) / U256::from(2); + let str_len = length.low_u64(); + Ok((str_len + 31) / 32) + } +} + +/// Keccak256 hash using sp_core. +fn keccak256(data: &[u8]) -> [u8; 32] { + sp_core::keccak_256(data) +} diff --git a/modules/consensus/pharos/prover/src/rpc.rs b/modules/consensus/pharos/prover/src/rpc.rs new file mode 100644 index 000000000..5e35715d9 --- /dev/null +++ b/modules/consensus/pharos/prover/src/rpc.rs @@ -0,0 +1,329 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! RPC client for Pharos node interactions. +//! +//! Standard Ethereum JSON-RPC calls (`eth_blockNumber`, `eth_getBlockByNumber`) +//! are handled via the alloy provider. Pharos-specific endpoints that use +//! non-standard response formats (`eth_getProof`, `debug_getBlockProof`, +//! `debug_getValidatorInfo`) are called through a raw reqwest client. + +use crate::ProverError; +use alloy_eips::BlockNumberOrTag; +use alloy_provider::{Provider, RootProvider}; +use ethabi::ethereum_types::H64; +use geth_primitives::CodecHeader; +use primitive_types::{H160, H256, U256}; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::{AtomicU64, Ordering}; + +/// JSON-RPC request structure. +#[derive(Debug, Serialize)] +struct JsonRpcRequest

{ + pub jsonrpc: &'static str, + pub method: &'static str, + pub params: P, + pub id: u64, +} + +impl

JsonRpcRequest

{ + fn new(method: &'static str, params: P, id: u64) -> Self { + Self { jsonrpc: "2.0", method, params, id } + } +} + +/// JSON-RPC response structure. +#[derive(Debug, Deserialize)] +struct JsonRpcResponse { + #[allow(dead_code)] + pub jsonrpc: String, + pub result: Option, + pub error: Option, + #[allow(dead_code)] + pub id: u64, +} + +/// JSON-RPC error structure. +#[derive(Debug, Deserialize)] +struct JsonRpcError { + pub code: i64, + pub message: String, + #[allow(dead_code)] + pub data: Option, +} + +/// Block proof response from `debug_getBlockProof`. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockProof { + /// Block number as hex string (e.g., "0x1234") + pub block_number: String, + /// Block proof hash - the message that was signed + pub block_proof_hash: String, + /// Aggregated BLS signature as hex string + pub bls_aggregated_signature: String, + /// List of BLS public keys that signed, as hex strings + pub signed_bls_keys: Vec, +} + +/// Single proof node in the Pharos hexary hash tree format. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcProofNode { + /// Raw node bytes as hex string + pub proof_node: String, + /// Start offset where the child hash begins + pub next_begin_offset: u32, + /// End offset where the child hash ends + pub next_end_offset: u32, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcSiblingProof { + pub slot_index: u8, + pub leftmost_leaf_key: String, + pub proof_path: Vec, +} + +/// Account proof response from `eth_getProof`. +/// +/// Uses a custom response format (Pharos hexary hash tree nodes instead of +/// standard Ethereum MPT nodes), so this endpoint is called via raw JSON-RPC +/// rather than the alloy provider. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcAccountProof { + pub account_proof: Vec, + pub balance: String, + pub code_hash: String, + pub nonce: String, + pub storage_hash: String, + /// RLP-encoded account value (rawValue) + pub raw_value: String, + pub storage_proof: Vec, + pub is_exist: bool, + #[serde(default)] + pub sibling_leftmost_leaf_proofs: Vec, +} + +/// Storage proof entry from `eth_getProof`. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcStorageProof { + pub key: String, + pub value: String, + pub proof: Vec, + pub is_exist: bool, + #[serde(default)] + pub sibling_leftmost_leaf_proofs: Vec, +} + +/// Validator info from `debug_getValidatorInfo`. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcValidatorInfo { + pub bls_key: String, + pub identity_key: String, + pub staking: String, + #[serde(rename = "validatorID")] + pub validator_id: String, +} + +/// Response from `debug_getValidatorInfo`. +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcValidatorInfoResponse { + pub block_number: String, + pub validator_set: Vec, +} + +/// RPC client for Pharos node. +/// +/// Uses an alloy provider for standard Ethereum JSON-RPC queries and a raw +/// reqwest client for Pharos-specific debug endpoints and `eth_getProof` +/// (which returns non-standard proof node formats). +pub struct PharosRpcClient { + endpoint: String, + client: reqwest::Client, + provider: RootProvider, + request_id: AtomicU64, +} + +impl PharosRpcClient { + /// Create a new RPC client for the given endpoint URL. + pub fn new(endpoint: impl Into) -> Result { + let endpoint = endpoint.into(); + let provider = RootProvider::new_http( + endpoint.parse().map_err(|_| ProverError::InvalidUrl(endpoint.clone()))?, + ); + Ok(Self { + endpoint, + client: reqwest::Client::new(), + provider, + request_id: AtomicU64::new(1), + }) + } + + fn next_id(&self) -> u64 { + self.request_id.fetch_add(1, Ordering::SeqCst) + } + + /// Make a raw JSON-RPC call for non-standard endpoints. + async fn call Deserialize<'de>>( + &self, + method: &'static str, + params: P, + ) -> Result { + let request = JsonRpcRequest::new(method, params, self.next_id()); + + let response = self.client.post(&self.endpoint).json(&request).send().await?; + + let rpc_response: JsonRpcResponse = + response.json().await.map_err(|_| ProverError::JsonDeserialization)?; + + if let Some(error) = rpc_response.error { + return Err(ProverError::RpcError { code: error.code, message: error.message }); + } + + rpc_response.result.ok_or(ProverError::MissingRpcResult) + } + + /// Fetch the latest block number. + pub async fn get_storage_at( + &self, + address: H160, + slot: U256, + block_number: u64, + ) -> Result { + let address_hex = format!("0x{:x}", address); + let slot_hex = format!("0x{:064x}", slot); + let block_hex = format!("0x{:x}", block_number); + let value: String = + self.call("eth_getStorageAt", (address_hex, slot_hex, block_hex)).await?; + let bytes = hex_to_bytes(&value)?; + let mut padded = [0u8; 32]; + if bytes.len() <= 32 { + padded[32 - bytes.len()..].copy_from_slice(&bytes); + } + Ok(U256::from_big_endian(&padded)) + } + + pub async fn get_block_number(&self) -> Result { + self.provider + .get_block_number() + .await + .map_err(|e| ProverError::ProviderError(e.to_string())) + } + + /// Fetch a block header by number, converting the response to [`CodecHeader`]. + pub async fn get_block_by_number(&self, block_number: u64) -> Result { + let block = self + .provider + .get_block_by_number(BlockNumberOrTag::Number(block_number)) + .await + .map_err(|e| ProverError::ProviderError(e.to_string()))? + .ok_or(ProverError::BlockNotFound(block_number))?; + + let h = &block.header.inner; + + Ok(CodecHeader { + parent_hash: H256::from(h.parent_hash.0), + uncle_hash: H256::from(h.ommers_hash.0), + coinbase: H160::from(h.beneficiary.0 .0), + state_root: H256::from(h.state_root.0), + transactions_root: H256::from(h.transactions_root.0), + receipts_root: H256::from(h.receipts_root.0), + logs_bloom: { + let mut bloom = [0u8; 256]; + bloom.copy_from_slice(h.logs_bloom.as_ref()); + bloom.into() + }, + difficulty: U256::from_big_endian(&h.difficulty.to_be_bytes::<32>()), + number: U256::from(h.number), + gas_limit: h.gas_limit, + gas_used: h.gas_used, + timestamp: h.timestamp, + extra_data: h.extra_data.to_vec(), + mix_hash: H256::from(h.mix_hash.0), + nonce: H64::from(h.nonce.0), + base_fee_per_gas: h.base_fee_per_gas.map(U256::from), + withdrawals_hash: h.withdrawals_root.map(|v| H256::from(v.0)), + blob_gas_used: h.blob_gas_used, + excess_blob_gas_used: h.excess_blob_gas, + parent_beacon_root: h.parent_beacon_block_root.map(|v| H256::from(v.0)), + requests_hash: h.requests_hash.map(|v| H256::from(v.0)), + }) + } + + /// Fetch block proof using `debug_getBlockProof`. + pub async fn get_block_proof(&self, block_number: u64) -> Result { + let block_hex = format!("0x{:x}", block_number); + self.call("debug_getBlockProof", vec![block_hex]).await + } + + /// Fetch account and storage proofs using `eth_getProof`. + /// + /// This uses the raw JSON-RPC client because Pharos returns proof nodes + /// in its own hexary hash tree format rather than standard Ethereum MPT nodes. + pub async fn get_proof( + &self, + address: H160, + storage_keys: Vec, + block_number: u64, + ) -> Result { + let address_hex = format!("0x{:x}", address); + let keys_hex: Vec = storage_keys.iter().map(|k| format!("0x{:x}", k)).collect(); + let block_hex = format!("0x{:x}", block_number); + + self.call("eth_getProof", (address_hex, keys_hex, block_hex)).await + } + + /// Fetch validator info using `debug_getValidatorInfo`. + pub async fn get_validator_info( + &self, + block_number: Option, + ) -> Result { + let block_param = match block_number { + Some(n) => format!("0x{:x}", n), + None => "latest".to_string(), + }; + self.call("debug_getValidatorInfo", vec![block_param]).await + } +} + +/// Parse a hex string to bytes. +pub fn hex_to_bytes(hex: &str) -> Result, ProverError> { + let hex = hex.trim_start_matches("0x"); + hex::decode(hex).map_err(|_| ProverError::HexDecode) +} + +/// Parse a hex string to H256. +pub fn hex_to_h256(hex: &str) -> Result { + let bytes = hex_to_bytes(hex)?; + if bytes.len() != 32 { + return Err(ProverError::InvalidH256Length(bytes.len())); + } + Ok(H256::from_slice(&bytes)) +} + +/// Parse a hex string to u64. +pub fn hex_to_u64(hex: &str) -> Result { + let hex = hex.trim_start_matches("0x"); + if hex.is_empty() { + return Ok(0); + } + u64::from_str_radix(hex, 16).map_err(|_| ProverError::InvalidNumber) +} diff --git a/modules/consensus/pharos/verifier/Cargo.toml b/modules/consensus/pharos/verifier/Cargo.toml new file mode 100644 index 000000000..82042bbf3 --- /dev/null +++ b/modules/consensus/pharos/verifier/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pharos-verifier" +version = "0.1.0" +edition = "2021" +description = "Pharos consensus verifier for light client" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +pharos-primitives = { workspace = true, default-features = false } +crypto-utils = { workspace = true, default-features = false } +geth-primitives = { workspace = true, default-features = false } +ismp = { workspace = true, default-features = false } +bls = { workspace = true } +log = { workspace = true, default-features = false } +anyhow = { workspace = true, default-features = false } +thiserror = { workspace = true } +primitive-types = { workspace = true } + +[features] +default = ["std"] +std = [ + "pharos-primitives/std", + "crypto-utils/std", + "geth-primitives/std", + "ismp/std", + "log/std", + "anyhow/std", + "bls/std", + "primitive-types/std", +] + +[dependencies.hex] +version = "0.4.3" +default-features = false +features = ["alloc"] + +[dev-dependencies] +hex-literal = { workspace = true, default-features = true } diff --git a/modules/consensus/pharos/verifier/src/error.rs b/modules/consensus/pharos/verifier/src/error.rs new file mode 100644 index 000000000..297b3b30c --- /dev/null +++ b/modules/consensus/pharos/verifier/src/error.rs @@ -0,0 +1,188 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Error types for Pharos verifier. + +use pharos_primitives::BlsPublicKey; +use primitive_types::U256; +use thiserror::Error; + +/// Errors that can occur during Pharos block verification. +#[derive(Debug, Error)] +pub enum Error { + /// The update is for a block that has already been finalized + #[error("Stale update: current finalized block {current} >= update block {update}")] + StaleUpdate { + /// Current finalized block number + current: u64, + /// Update block number + update: u64, + }, + + /// The update block is not in the expected epoch + #[error("Epoch mismatch: update block is in epoch {update_epoch}, expected {expected_epoch}")] + EpochMismatch { + /// The epoch of the update block + update_epoch: u64, + /// The expected epoch (current verifier state epoch) + expected_epoch: u64, + }, + + /// A participating validator is not in the trusted validator set + #[error("Unknown validator with BLS key: {}", hex::encode(&key.as_ref()[..8]))] + UnknownValidator { + /// The unknown validator's BLS public key + key: BlsPublicKey, + }, + + /// Not enough stake participated in signing the block + #[error( + "Insufficient stake: {participating} participated, {required} required (total: {total})" + )] + InsufficientStake { + /// Stake that participated + participating: U256, + /// Required stake (2/3 + 1 of total) + required: U256, + /// Total network stake + total: U256, + }, + + /// Block proof hash from RPC doesn't match computed header hash + #[error("Block proof hash mismatch: computed {computed}, provided {provided}")] + BlockProofHashMismatch { computed: primitive_types::H256, provided: primitive_types::H256 }, + + /// No validators participated in signing + #[error("No validators participated in signing")] + NoParticipants, + + /// Duplicate participant key in block proof + #[error("Duplicate participant key in block proof")] + DuplicateParticipant, + + /// BLS signature verification failed + #[error("BLS signature verification failed")] + InvalidSignature, + + /// BLS cryptography error + #[error("BLS error: {0:?}")] + BlsError(bls::errors::BLSError), + + /// Missing validator set proof for epoch boundary block + #[error("Missing validator set proof for epoch boundary block {block_number}")] + MissingValidatorSetProof { + /// Block number that requires a validator set proof + block_number: u64, + }, + + /// Unexpected validator set proof for non-epoch-boundary block + #[error("Unexpected validator set proof for non-epoch-boundary block {block_number}")] + UnexpectedValidatorSetProof { + /// Block number that should not have a validator set proof + block_number: u64, + }, + + /// Storage proof lookup failed + #[error("Storage proof lookup failed")] + StorageProofLookupFailed, + + /// Storage proof verification failed against the state root + #[error("Storage proof verification failed: {0}")] + StorageProofVerificationFailed(#[from] pharos_primitives::spv::Error), + + /// Storage value exceeds maximum size for U256 + #[error("Storage value too large for U256")] + StorageValueTooLarge, + + /// Mismatch between number of storage slots and values + #[error("Slots and values length mismatch: {slots} slots, {values} values")] + SlotValueLengthMismatch { slots: usize, values: usize }, + + /// Not enough storage values provided for validator set + #[error("Insufficient storage values: expected at least {expected}, got {got}")] + InsufficientStorageValues { expected: usize, got: usize }, + + /// Not enough pool IDs provided for validators + #[error("Insufficient pool IDs: expected {expected} for {validators} validators, got {got}")] + InsufficientPoolIds { expected: usize, validators: usize, got: usize }, + + /// BLS public key slot value is missing + #[error("Missing BLS public key slot value")] + MissingBlsKeySlot, + + /// BLS key slot is empty + #[error("Empty BLS key slot")] + EmptyBlsKeySlot, + + /// Invalid short string length in BLS key slot + #[error("Invalid short string length in BLS key")] + InvalidBlsStringLength, + + /// BLS key string contains invalid UTF-8 + #[error("Invalid UTF-8 in BLS key string")] + InvalidBlsKeyUtf8, + + /// Long string BLS keys require additional data slots + #[error("Long string BLS key detected - string data slots required in proof")] + LongStringBlsKeyUnsupported, + + /// BLS key hex string is invalid + #[error("Invalid hex encoding in BLS key string")] + InvalidBlsKeyHex, + + /// BLS key has incorrect byte length + #[error("Invalid BLS key length: expected {expected}, got {got}")] + InvalidBlsKeyLength { expected: usize, got: usize }, + + /// Failed to convert BLS key bytes to the expected type + #[error("Failed to convert BLS key bytes")] + BlsKeyConversionFailed, + + /// Validator set contains no validators + #[error("Validator set is empty")] + EmptyValidatorSet, + + /// Computed total stake doesn't match claimed total + #[error("Total stake mismatch: computed {computed}, claimed {claimed}")] + ComputedStakeMismatch { computed: U256, claimed: U256 }, + + /// On-chain totalStake doesn't match sum of individual validator stakes + #[error("Total stake mismatch: computed {computed}, on-chain {on_chain}")] + TotalStakeMismatch { computed: U256, on_chain: U256 }, + + /// Duplicate validator detected in the set + #[error("Duplicate validator in set")] + DuplicateValidator, + + /// Validator has zero stake + #[error("Validator has zero stake")] + ZeroStakeValidator, + + /// Missing required storage value at a specific slot + #[error("Missing required storage value for {field}")] + MissingStorageValue { + /// Description of the missing field + field: &'static str, + }, + + /// Incomplete validator proof data + #[error("Incomplete validator proof: expected {expected} values, got {got}")] + IncompleteValidatorProof { + /// Expected number of values + expected: usize, + /// Actual number of values + got: usize, + }, +} diff --git a/modules/consensus/pharos/verifier/src/lib.rs b/modules/consensus/pharos/verifier/src/lib.rs new file mode 100644 index 000000000..73e0e6bf8 --- /dev/null +++ b/modules/consensus/pharos/verifier/src/lib.rs @@ -0,0 +1,245 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos consensus verifier. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +pub mod error; +pub mod state_proof; + +use error::Error; +use geth_primitives::Header; +use ismp::messaging::Keccak256; +use pharos_primitives::{ + BlockProof, BlsPublicKey, Config, ValidatorSet, VerifierState, VerifierStateUpdate, +}; +use primitive_types::H256; + +/// Domain Separation Tag for Pharos BLS signatures. +pub const PHAROS_BLS_DST: &str = "BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; + +/// Verifies a Pharos block proof and update the verifier state. +pub fn verify_pharos_block( + trusted_state: VerifierState, + update: VerifierStateUpdate, +) -> Result { + let update_block_number = update.block_number(); + let current_block_number = trusted_state.finalized_block_number; + + if update_block_number <= current_block_number { + return Err(Error::StaleUpdate { + current: current_block_number, + update: update_block_number, + }); + } + + let update_epoch = C::compute_epoch(update_block_number); + if update_epoch != trusted_state.current_epoch { + return Err(Error::EpochMismatch { + update_epoch, + expected_epoch: trusted_state.current_epoch, + }); + } + + verify_validator_membership( + &trusted_state.current_validator_set, + &update.block_proof.participant_keys, + )?; + + verify_stake_threshold( + &trusted_state.current_validator_set, + &update.block_proof.participant_keys, + )?; + + let computed_hash = Header::from(&update.header).hash::(); + + if computed_hash != update.block_proof.block_proof_hash { + return Err(Error::BlockProofHashMismatch { + computed: computed_hash, + provided: update.block_proof.block_proof_hash, + }); + } + + verify_bls_signature( + &update.block_proof.participant_keys, + &update.block_proof, + update.block_proof.block_proof_hash, + )?; + + let new_state = if C::is_epoch_boundary(update_block_number) { + // Epoch boundary block must always have validator set proof + let validator_set_proof = update + .validator_set_proof + .ok_or(Error::MissingValidatorSetProof { block_number: update_block_number })?; + + let next_epoch = C::compute_epoch(update_block_number) + 1; + let new_validator_set = state_proof::verify_validator_set_proof::( + update.header.state_root, + &validator_set_proof, + next_epoch, + )?; + + VerifierState { + current_validator_set: new_validator_set, + finalized_block_number: update_block_number, + finalized_hash: computed_hash, + current_epoch: next_epoch, + } + } else { + if update.validator_set_proof.is_some() { + return Err(Error::UnexpectedValidatorSetProof { block_number: update_block_number }); + } + + VerifierState { + finalized_block_number: update_block_number, + finalized_hash: computed_hash, + ..trusted_state + } + }; + + Ok(new_state) +} + +/// Verify that all participating validators are members of the trusted validator set. +fn verify_validator_membership( + validator_set: &ValidatorSet, + participants: &[BlsPublicKey], +) -> Result<(), Error> { + let deduped: alloc::collections::BTreeSet<&[u8]> = + participants.iter().map(|k| k.as_ref()).collect(); + if deduped.len() != participants.len() { + return Err(Error::DuplicateParticipant); + } + if let Some(key) = participants.iter().find(|key| !validator_set.contains(key)) { + return Err(Error::UnknownValidator { key: key.clone() }); + } + Ok(()) +} + +/// Verify that participating validators have more than 2/3 of total stake. +fn verify_stake_threshold( + validator_set: &ValidatorSet, + participants: &[BlsPublicKey], +) -> Result<(), Error> { + let participating_stake = validator_set.participating_stake(participants); + let total_stake = validator_set.total_stake; + let required = (total_stake * 2 / 3) + 1; + + if participating_stake >= required { + Ok(()) + } else { + Err(Error::InsufficientStake { + participating: participating_stake, + required, + total: total_stake, + }) + } +} + +/// Verify the BLS aggregate signature. +fn verify_bls_signature( + participants: &[BlsPublicKey], + block_proof: &BlockProof, + block_proof_hash: H256, +) -> Result<(), Error> { + if participants.is_empty() { + return Err(Error::NoParticipants); + } + + let aggregate_pubkey = crypto_utils::aggregate_public_keys(participants); + + // The message signed is the block_proof_hash + let message = block_proof_hash.as_bytes().to_vec(); + + let is_valid = bls::verify( + &aggregate_pubkey, + &message, + &block_proof.aggregate_signature, + &PHAROS_BLS_DST.as_bytes().to_vec(), + ); + + if !is_valid { + return Err(Error::InvalidSignature); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use pharos_primitives::{ValidatorInfo, ValidatorSet}; + use primitive_types::U256; + + fn make_key(byte: u8) -> BlsPublicKey { + let mut data = [0u8; 48]; + data[0] = byte; + BlsPublicKey::try_from(data.as_slice()).unwrap() + } + + fn make_validator_set(keys: &[BlsPublicKey]) -> ValidatorSet { + let mut set = ValidatorSet::new(1); + for key in keys { + set.add_validator(ValidatorInfo { + bls_public_key: key.clone(), + pool_id: Default::default(), + stake: U256::from(1000), + }); + } + set + } + + #[test] + fn test_duplicate_participant_keys_rejected() { + let key_a = make_key(1); + let key_b = make_key(2); + let set = make_validator_set(&[key_a.clone(), key_b.clone()]); + + // No duplicates: OK + assert!(verify_validator_membership(&set, &[key_a.clone(), key_b.clone()]).is_ok()); + + // Duplicate key: rejected + let result = verify_validator_membership(&set, &[key_a.clone(), key_a.clone()]); + assert!(matches!(result, Err(Error::DuplicateParticipant))); + } + + #[test] + fn test_unknown_participant_rejected() { + let key_a = make_key(1); + let key_unknown = make_key(99); + let set = make_validator_set(&[key_a.clone()]); + + let result = verify_validator_membership(&set, &[key_unknown]); + assert!(matches!(result, Err(Error::UnknownValidator { .. }))); + } + + #[test] + fn test_stake_threshold() { + let keys: Vec = (1..=10).map(make_key).collect(); + let set = make_validator_set(&keys); // 10 validators, 1000 each, total 10000 + + // 7 out of 10 (7000 stake) > 2/3 + 1 (6668): passes + assert!(verify_stake_threshold(&set, &keys[..7]).is_ok()); + + // 6 out of 10 (6000 stake) < 6668: fails + assert!(matches!( + verify_stake_threshold(&set, &keys[..6]), + Err(Error::InsufficientStake { .. }) + )); + } +} diff --git a/modules/consensus/pharos/verifier/src/state_proof.rs b/modules/consensus/pharos/verifier/src/state_proof.rs new file mode 100644 index 000000000..798824df0 --- /dev/null +++ b/modules/consensus/pharos/verifier/src/state_proof.rs @@ -0,0 +1,631 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! State proof verification for Pharos validator sets. +//! +//! This module handles verification of storage proofs for the validator set +//! stored in the staking contract at `0x4100000000000000000000000000000000000000`. +//! +//! Pharos uses a flat trie, so storage slot proofs verify directly against the +//! state root — no separate account proof is needed. +//! +//! ## Verification Steps +//! +//! 1. Recompute expected storage keys from the storage values +//! 2. Verify each storage value against its per-key proof path and the state root +//! 3. Decode the verified storage values into a ValidatorSet + +use crate::error::Error; +use alloc::{collections::BTreeMap, vec::Vec}; +use ismp::messaging::Keccak256; +use pharos_primitives::{ + spv, PharosProofNode, ValidatorInfo, ValidatorSet, ValidatorSetProof, STAKING_CONTRACT_ADDRESS, +}; +use primitive_types::{H256, U256}; + +/// This function verifies that the provided validator set is correctly stored +/// in the staking contract at the given block. +/// +/// The `epoch` parameter is the epoch this validator set will be valid for. +/// +/// Verification steps: +/// 1. Recompute expected storage keys from the storage values +/// 2. Verify each storage value against its per-key proof path and the state root +/// 3. Decode the verified storage values into a ValidatorSet +pub fn verify_validator_set_proof( + state_root: H256, + proof: &ValidatorSetProof, + epoch: u64, +) -> Result { + let layout = StakingContractLayout::default(); + + // Recompute expected storage keys from the storage values + let keys = compute_all_storage_keys::(&proof.storage_values, &layout)?; + + // Verify each storage value against its per-key proof path. + // Pharos uses a flat trie — storage proofs verify directly against state_root. + verify_all_storage_proofs(&keys, &proof.storage_values, &proof.storage_proof, &state_root)?; + + // Decode the verified storage values into a ValidatorSet + let decoded_set = decode_validator_set_from_storage::(&proof.storage_values, epoch)?; + + validate_validator_set(&decoded_set)?; + + Ok(decoded_set) +} + +/// Decode validator set from storage values. +/// +/// This function interprets the raw storage values according to the +/// Pharos staking contract's actual storage layout. +/// +/// ## Pharos Storage Layout +/// +/// The contract at `0x4100000000000000000000000000000000000000` uses: +/// - Slot 0: validators mapping (mapping(bytes32 => Validator)) +/// - Slot 6: totalStake (uint256) +/// - Slot 21: activePoolSets (EnumerableSet.Bytes32Set) +/// +/// Values are ordered: [totalStake, poolCount, poolId_0..poolId_n, +/// validator_0_bls_header, validator_0_bls_data_0..N_0, validator_0_stake, ...] +/// +/// The number of BLS data slots per validator varies based on whether the key +/// was registered with or without a "0x" prefix (3 or 4 data slots respectively). +fn decode_validator_set_from_storage( + values: &[Vec], + epoch: u64, +) -> Result { + // We need 2 values at minimum: totalStake, activePoolSets length + if values.len() < 2 { + return Err(Error::InsufficientStorageValues { expected: 2, got: values.len() }); + } + + // Parse global state + // Index 0: totalStake + let on_chain_total_stake = decode_u256_from_storage(&values[0])?; + + // Index 1: activePoolSets array length + let validator_count = decode_u256_from_storage(&values[1])?; + + let count = validator_count.low_u64() as usize; + + // Pool IDs start at index 2 (after totalStake, array length) + let pool_set_start = 2; + let pool_ids_end = pool_set_start + count; + + if values.len() < pool_ids_end { + return Err(Error::InsufficientPoolIds { + expected: pool_ids_end, + validators: count, + got: values.len(), + }); + } + + let mut validator_set = ValidatorSet::new(epoch); + + let mut idx = pool_ids_end; + for i in 0..count { + // Pool ID from activePoolSets array + let pool_id = { + let v = &values[pool_set_start + i]; + let mut bytes = [0u8; 32]; + if v.len() <= 32 { + bytes[32 - v.len()..].copy_from_slice(v); + } + H256::from(bytes) + }; + + // BLS header at current index + if idx >= values.len() { + return Err(Error::InsufficientStorageValues { expected: idx + 1, got: values.len() }); + } + let data_slots = bls_data_slots_from_header(&values[idx])?; + + let bls_string_slot = &Some(values[idx].clone()); + idx += 1; + + // BLS data slots (dynamic count) + if idx + data_slots > values.len() { + return Err(Error::InsufficientStorageValues { + expected: idx + data_slots, + got: values.len(), + }); + } + let bls_data_slots: Vec>> = + values[idx..idx + data_slots].iter().map(|v| Some(v.clone())).collect(); + idx += data_slots; + + let bls_key = decode_bls_key_from_string_slot(bls_string_slot, Some(&bls_data_slots))?; + + // totalStake + if idx >= values.len() { + return Err(Error::InsufficientStorageValues { expected: idx + 1, got: values.len() }); + } + let stake = decode_u256_from_storage(&values[idx])?; + idx += 1; + + let validator = ValidatorInfo { bls_public_key: bls_key, pool_id, stake }; + + if !validator_set.add_validator(validator) { + return Err(Error::DuplicateValidator); + } + } + + if validator_set.total_stake != on_chain_total_stake { + return Err(Error::TotalStakeMismatch { + computed: validator_set.total_stake, + on_chain: on_chain_total_stake, + }); + } + + Ok(validator_set) +} + +/// Decode BLS public key from a Solidity string storage slot. +/// +/// In Solidity, strings are stored as: +/// - Short strings (< 32 bytes): data is stored directly in the slot, length in lowest byte +/// - Long strings (>= 32 bytes): slot contains (length * 2 + 1), data at keccak256(slot) +/// +/// The BLS public key is a 48-byte value, stored as a hex string. The number of +/// data slots varies based on whether the key includes a "0x" prefix: +/// - With prefix: 98 chars → ceil(98/32) = 4 data slots +/// - Without prefix: 96 chars → ceil(96/32) = 3 data slots +fn decode_bls_key_from_string_slot( + header_value: &Option>, + data_slots: Option<&[Option>]>, +) -> Result { + use alloc::string::String; + + let header = header_value.as_ref().ok_or(Error::MissingBlsKeySlot)?; + + if header.is_empty() { + return Err(Error::EmptyBlsKeySlot); + } + + let header_val = decode_u256_from_storage(header)?; + let header_bytes = header_val.to_big_endian(); + let lowest_byte = header_bytes[31]; + + let bls_hex: String = if lowest_byte & 1 == 0 { + // Short string: data is in the slot, length = lowest_byte / 2 + let len = (lowest_byte / 2) as usize; + if len > 31 { + return Err(Error::InvalidBlsStringLength); + } + // String data is stored in the high bytes of the slot + String::from_utf8(header_bytes[..len].to_vec()).map_err(|_| Error::InvalidBlsKeyUtf8)? + } else { + // Long string: header contains (length * 2 + 1) + let length = (header_val - 1) / 2; + let str_len = length.low_u64() as usize; + + // For BLS keys, we expect a 96 or 98 character hex string + // This requires 3 data slots (ceil(96/32) = 3) + let data_slots = data_slots.ok_or(Error::LongStringBlsKeyUnsupported)?; + + let slots_needed = (str_len + 31) / 32; + if data_slots.len() < slots_needed { + return Err(Error::InsufficientStorageValues { + expected: slots_needed, + got: data_slots.len(), + }); + } + + let mut string_data = Vec::with_capacity(str_len); + for (i, slot_value) in data_slots.iter().take(slots_needed).enumerate() { + let slot_data = slot_value.as_ref().ok_or(Error::MissingBlsKeySlot)?; + let decoded = decode_u256_from_storage(slot_data)?; + let bytes = decoded.to_big_endian(); + + let remaining = str_len - (i * 32); + let take = remaining.min(32); + string_data.extend_from_slice(&bytes[..take]); + } + + String::from_utf8(string_data).map_err(|_| Error::InvalidBlsKeyUtf8)? + }; + + let bls_hex = bls_hex.trim_start_matches("0x"); + let bls_bytes = hex::decode(bls_hex).map_err(|_| Error::InvalidBlsKeyHex)?; + + // The staking contract may store a prefix before the 48-byte BLS key. + // Extract the last 48 bytes which contain the actual G1 compressed key. + if bls_bytes.len() < 48 { + return Err(Error::InvalidBlsKeyLength { expected: 48, got: bls_bytes.len() }); + } + + let key_start = bls_bytes.len() - 48; + bls_bytes[key_start..].try_into().map_err(|_| Error::BlsKeyConversionFailed) +} + +/// Recompute the expected storage keys in the same order as `storage_values`. +/// +/// The order matches the prover's output: +/// [totalStake, activePoolSets length, pool_id_0..n, +/// validator_0_bls_header, validator_0_bls_data_0..N_0, validator_0_stake, ...] +/// +/// The number of BLS data slots per validator is dynamically determined from +/// each validator's BLS string header value in `storage_values`. +fn compute_all_storage_keys( + storage_values: &[Vec], + layout: &StakingContractLayout, +) -> Result, Error> { + if storage_values.len() < 2 { + return Err(Error::InsufficientStorageValues { expected: 2, got: storage_values.len() }); + } + + let mut keys = Vec::new(); + + // Index 0: totalStake + keys.push(layout.raw_slot_key(layout.total_stake_slot)); + + // Index 1: activePoolSets length + keys.push(layout.raw_slot_key(layout.active_pool_set_slot)); + + // Parse validator count from storage_values[1] + let count_val = decode_u256_from_storage(&storage_values[1])?; + let count = count_val.low_u64() as usize; + + // Pool ID array element keys + for i in 0..count { + keys.push(layout.array_element_key::(layout.active_pool_set_slot, i as u64)); + } + + // Extract pool IDs from storage values to compute validator keys + let pool_set_start = 2; + let pool_ids_end = pool_set_start + count; + + if storage_values.len() < pool_ids_end { + return Err(Error::InsufficientPoolIds { + expected: pool_ids_end, + validators: count, + got: storage_values.len(), + }); + } + + // For each validator, dynamically determine the BLS data slot count + // from the header value in storage_values + let mut idx = pool_ids_end; + for i in 0..count { + let v = &storage_values[pool_set_start + i]; + let mut bytes = [0u8; 32]; + if v.len() <= 32 { + bytes[32 - v.len()..].copy_from_slice(v); + } + let pool_id = H256::from(bytes); + + // The BLS header value is at the current index + if idx >= storage_values.len() { + return Err(Error::InsufficientStorageValues { + expected: idx + 1, + got: storage_values.len(), + }); + } + let data_slots = bls_data_slots_from_header(&storage_values[idx])?; + + let validator_keys = layout.get_validator_keys::(&pool_id, data_slots); + keys.extend(validator_keys); + + // Advance index: 1 (header) + data_slots + 1 (stake) + idx += 1 + data_slots + 1; + } + + Ok(keys) +} + +/// Verify each storage value against its per-key proof path in the storage trie. +fn verify_all_storage_proofs( + keys: &[H256], + values: &[Vec], + storage_proof: &BTreeMap>, + storage_hash: &H256, +) -> Result<(), Error> { + if keys.len() != values.len() { + return Err(Error::SlotValueLengthMismatch { slots: keys.len(), values: values.len() }); + } + + let address: [u8; 20] = STAKING_CONTRACT_ADDRESS.0 .0; + + for (key, value) in keys.iter().zip(values.iter()) { + let proof_nodes = storage_proof + .get(key) + .ok_or(Error::MissingStorageValue { field: "storage proof for key" })?; + + let mut padded_value = [0u8; 32]; + if value.len() <= 32 { + padded_value[32 - value.len()..].copy_from_slice(value); + } else { + return Err(Error::StorageValueTooLarge); + } + + spv::verify_proof( + proof_nodes, + &spv::build_storage_key(&address, &key.0), + &padded_value, + &storage_hash.0, + )?; + } + + Ok(()) +} + +/// Validate the internal consistency of a validator set. +pub fn validate_validator_set(validator_set: &ValidatorSet) -> Result<(), Error> { + if validator_set.is_empty() { + return Err(Error::EmptyValidatorSet); + } + + for validator in validator_set.validators.values() { + if validator.stake.is_zero() { + return Err(Error::ZeroStakeValidator); + } + } + + Ok(()) +} + +/// Storage layout information for the Pharos staking contract. +/// +/// Based on the actual Pharos staking contract at `0x4100000000000000000000000000000000000000`. +/// +/// ## Contract Storage Layout (StakingStorageV1) +/// +/// ```solidity +/// mapping(bytes32 => Validator) public validators; // slot 0 +/// bytes32[] public activePoolIds; // slot 1 +/// bytes32[] public pendingAddPoolIds; // slot 2 +/// bytes32[] public pendingUpdatePoolIds; // slot 3 +/// bytes32[] public pendingExitPoolIds; // slot 4 +/// uint256 public currentEpoch; // slot 5 +/// uint256 public totalStake; // slot 6 +/// IChainConfig public cfg; // slot 7 +/// mapping(address => uint256) public pendingWithdrawStakes; // slot 8 +/// uint256 public totalSupply; // slot 9 +/// uint256 public currentInflationRate; // slot 10 +/// uint256 public lastInflationAdjustmentTime; // slot 11 +/// uint256 public lastInflationTotalSupplySnapshot; // slot 12 +/// address internal implAddress; // slot 13 +/// ``` +/// +/// ## Contract Storage Layout (StakingStorageV2) +/// +/// ```solidity +/// uint256 lastEpochStartTime; // slot 14 +/// mapping(bytes32 => mapping(address => Delegator)) delegators; // slot 15 +/// mapping(bytes32 => mapping(address => bool)) validatorWhitelists; // slot 16 +/// mapping(bytes32 => uint256) accumulatedRewardPerShares; // slot 17 +/// mapping(bytes32 => uint256) commissionRates; // slot 18 +/// mapping(bytes32 => bool) delegationEnabledMapping; // slot 19 +/// mapping(bytes32 => uint256) delegatorCounts; // slot 20 +/// EnumerableSet.Bytes32Set activePoolSets; // slot 21-22 +/// EnumerableSet.Bytes32Set pendingAddPoolSets; // slot 24-25 +/// EnumerableSet.Bytes32Set pendingUpdatePoolSets; // slot 26-27 +/// EnumerableSet.Bytes32Set pendingExitPoolSets; // slot 28-29 +/// ``` +/// +/// The contract currently uses the V2 layout. Active pool IDs are stored +/// in `activePoolSets` at slot 21 as an `EnumerableSet.Bytes32Set` +/// (slot 21 = `_inner._values` array, slot 22 = `_inner._positions` mapping). +/// +/// ## Validator Struct +/// +/// ```solidity +/// struct Validator { +/// string description; // offset 0 +/// string publicKey; // offset 1 +/// string publicKeyPop; // offset 2 +/// string blsPublicKey; // offset 3 +/// string blsPublicKeyPop; // offset 4 +/// string endpoint; // offset 5 +/// uint8 status; // offset 6 +/// bytes32 poolId; // offset 7 +/// uint256 totalStake; // offset 8 +/// address owner; // offset 9 +/// uint256 stakeSnapshot; // offset 10 +/// uint256 pendingWithdrawStake; // offset 11 +/// uint8 pendingWithdrawWindow; // offset 12 +/// } +/// ``` +#[derive(Debug, Clone)] +pub struct StakingContractLayout { + /// Storage slot for the validators mapping + pub validators_mapping_slot: u64, + /// Storage slot for activePoolSets (EnumerableSet._inner._values) + pub active_pool_set_slot: u64, + /// Storage slot for totalStake + pub total_stake_slot: u64, +} + +/// Offsets within the Validator struct for each field. +#[derive(Debug, Clone, Copy)] +pub struct ValidatorStructOffsets { + /// Offset for description (string) + pub description: u64, + /// Offset for publicKey (string) + pub public_key: u64, + /// Offset for publicKeyPop (string) + pub public_key_pop: u64, + /// Offset for blsPublicKey (string) + pub bls_public_key: u64, + /// Offset for blsPublicKeyPop (string) + pub bls_public_key_pop: u64, + /// Offset for endpoint (string) + pub endpoint: u64, + /// Offset for status (uint8) + pub status: u64, + /// Offset for poolId (bytes32) + pub pool_id: u64, + /// Offset for totalStake (uint256) + pub total_stake: u64, + /// Offset for owner (address) + pub owner: u64, + /// Offset for stakeSnapshot (uint256) + pub stake_snapshot: u64, + /// Offset for pendingWithdrawStake (uint256) + pub pending_withdraw_stake: u64, + /// Offset for pendingWithdrawWindow (uint8) + pub pending_withdraw_window: u64, +} + +impl Default for ValidatorStructOffsets { + fn default() -> Self { + Self { + description: 0, + public_key: 1, + public_key_pop: 2, + bls_public_key: 3, + bls_public_key_pop: 4, + endpoint: 5, + status: 6, + pool_id: 7, + total_stake: 8, + owner: 9, + stake_snapshot: 10, + pending_withdraw_stake: 11, + pending_withdraw_window: 12, + } + } +} + +impl Default for StakingContractLayout { + fn default() -> Self { + Self { validators_mapping_slot: 0, active_pool_set_slot: 21, total_stake_slot: 6 } + } +} + +impl StakingContractLayout { + /// Calculate the raw storage key for a simple slot (no hashing). + pub fn raw_slot_key(&self, slot: u64) -> H256 { + H256::from_low_u64_be(slot) + } + + /// Calculate the storage key for a dynamic array element. + pub fn array_element_key(&self, base_slot: u64, index: u64) -> H256 { + self.array_element_key_with(base_slot, index, H::keccak256) + } + + /// Non-generic variant that accepts a concrete hash function. + pub fn array_element_key_with( + &self, + base_slot: u64, + index: u64, + keccak: impl FnOnce(&[u8]) -> H256, + ) -> H256 { + let slot_bytes = U256::from(base_slot).to_big_endian(); + let base_key = keccak(&slot_bytes); + let base_pos = U256::from_big_endian(&base_key.0); + let element_pos = base_pos + U256::from(index); + H256(element_pos.to_big_endian()) + } + + /// Calculate the base storage slot for a validator in the mapping. + pub fn validator_base_slot(&self, pool_id: &H256) -> H256 { + let mut data = [0u8; 64]; + data[..32].copy_from_slice(pool_id.as_bytes()); + data[32..64].copy_from_slice(&U256::from(self.validators_mapping_slot).to_big_endian()); + H::keccak256(&data) + } + + /// Calculate the storage slot for a specific field within a Validator struct. + pub fn validator_field_slot(&self, pool_id: &H256, field_offset: u64) -> H256 { + let base = self.validator_base_slot::(pool_id); + let base_pos = U256::from_big_endian(base.as_bytes()); + let field_pos = base_pos + U256::from(field_offset); + H256(field_pos.to_big_endian()) + } + + /// Calculate the storage slot for string data. + pub fn string_data_slot(&self, string_slot: &H256) -> H256 { + H::keccak256(string_slot.as_bytes()) + } + + /// Get storage keys for a specific validator's data. + /// + /// Returns keys for: + /// - BLS public key string slot (offset 3) + /// - BLS public key data slots (dynamic count based on string length) + /// - totalStake (offset 8) + /// + /// The `bls_data_slot_count` parameter specifies how many data slots to include + /// for the BLS public key string. This is derived from the string header value: + /// - Keys with "0x" prefix (98 chars): ceil(98/32) = 4 slots + /// - Keys without prefix (96 chars): ceil(96/32) = 3 slots + pub fn get_validator_keys( + &self, + pool_id: &H256, + bls_data_slot_count: usize, + ) -> Vec { + let offsets = ValidatorStructOffsets::default(); + let mut keys = Vec::new(); + + // BLS public key string slot (stores length for long strings) + let bls_string_slot = self.validator_field_slot::(pool_id, offsets.bls_public_key); + keys.push(bls_string_slot); + + // BLS public key data slots (for long strings) + // Data is stored at keccak256(string_slot) for `bls_data_slot_count` slots + let bls_data_base = self.string_data_slot::(&bls_string_slot); + let bls_data_base_pos = U256::from_big_endian(bls_data_base.as_bytes()); + for i in 0..bls_data_slot_count { + let slot_pos = bls_data_base_pos + U256::from(i); + keys.push(H256(slot_pos.to_big_endian())); + } + + // totalStake field + keys.push(self.validator_field_slot::(pool_id, offsets.total_stake)); + + keys + } +} + +/// Determine the number of BLS data slots from the Solidity string header value. +/// +/// For long strings (>= 32 bytes), the header slot contains `length * 2 + 1`. +/// The actual byte length is `(header_value - 1) / 2`, and the number of 32-byte +/// data slots is `ceil(length / 32)`. +/// +/// For short strings (< 32 bytes), the data is stored directly in the header slot +/// and no additional data slots are needed (returns 0). +pub fn bls_data_slots_from_header(header_value: &[u8]) -> Result { + let header_val = decode_u256_from_storage(header_value)?; + let header_bytes = header_val.to_big_endian(); + let lowest_byte = header_bytes[31]; + + if lowest_byte & 1 == 0 { + // Short string - data is in the header itself + Ok(0) + } else { + // Long string - header = length * 2 + 1 + let length = (header_val - 1) / 2; + let str_len = length.low_u64() as usize; + Ok((str_len + 31) / 32) + } +} + +/// Decode a U256 value from raw big-endian storage bytes. +pub fn decode_u256_from_storage(value: &[u8]) -> Result { + if value.is_empty() { + return Ok(U256::zero()); + } + + if value.len() <= 32 { + let mut padded = [0u8; 32]; + padded[32 - value.len()..].copy_from_slice(value); + Ok(U256::from_big_endian(&padded)) + } else { + Err(Error::StorageValueTooLarge) + } +} diff --git a/modules/consensus/sync-committee/primitives/Cargo.toml b/modules/consensus/sync-committee/primitives/Cargo.toml index 365d5065d..4f5104aab 100644 --- a/modules/consensus/sync-committee/primitives/Cargo.toml +++ b/modules/consensus/sync-committee/primitives/Cargo.toml @@ -16,6 +16,7 @@ serde = { workspace = true, optional = true, features = ["derive"] } hex = { workspace = true, default-features = false, features = ["alloc"] } anyhow = { workspace = true, default-features = false } serde-hex-utils = { workspace = true, default-features = false } +crypto-utils = { workspace = true, default-features = false } ssz-rs = { git = "https://github.com/polytope-labs/ssz-rs", branch = "main", default-features = false } @@ -31,5 +32,6 @@ std = [ "primitive-types/std", "serde", "serde-hex-utils/std", + "crypto-utils/std", ] nofulu = [] diff --git a/modules/consensus/sync-committee/primitives/src/ssz/mod.rs b/modules/consensus/sync-committee/primitives/src/ssz/mod.rs index ebeaa9b3b..6f071568e 100644 --- a/modules/consensus/sync-committee/primitives/src/ssz/mod.rs +++ b/modules/consensus/sync-committee/primitives/src/ssz/mod.rs @@ -1,5 +1,4 @@ mod byte_list; -mod byte_vector; use core::fmt; fn write_bytes_to_lower_hex>(f: &mut fmt::Formatter<'_>, data: T) -> fmt::Result { @@ -13,4 +12,4 @@ fn write_bytes_to_lower_hex>(f: &mut fmt::Formatter<'_>, data: T) } pub use byte_list::ByteList; -pub use byte_vector::ByteVector; +pub use crypto_utils::ssz::ByteVector; diff --git a/modules/ismp/clients/pharos/Cargo.toml b/modules/ismp/clients/pharos/Cargo.toml new file mode 100644 index 000000000..275063f2d --- /dev/null +++ b/modules/ismp/clients/pharos/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "ismp-pharos" +version = "0.1.0" +edition = "2021" +description = "ISMP Consensus Client for the Pharos Network" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +log = { workspace = true, default-features = false } +anyhow = { workspace = true, default-features = false } +codec = { workspace = true, default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +ismp = { workspace = true } +pharos-verifier = { workspace = true } +pharos-primitives = { workspace = true } +sync-committee-primitives = { workspace = true } +geth-primitives = { workspace = true } +pharos-state-machine = { workspace = true } +pallet-ismp-host-executive = { workspace = true } +pallet-ismp = { workspace = true } + +[dependencies.polkadot-sdk] +workspace = true +features = ["sp-core"] + +[features] +default = ["std"] +std = [ + "log/std", + "anyhow/std", + "polkadot-sdk/std", + "codec/std", + "scale-info/std", + "pharos-verifier/std", + "pharos-primitives/std", + "ismp/std", + "sync-committee-primitives/std", + "geth-primitives/std", + "pharos-state-machine/std", + "pallet-ismp-host-executive/std", + "pallet-ismp/std", +] +try-runtime = ["polkadot-sdk/try-runtime"] diff --git a/modules/ismp/clients/pharos/src/lib.rs b/modules/ismp/clients/pharos/src/lib.rs new file mode 100644 index 000000000..71ed4c16b --- /dev/null +++ b/modules/ismp/clients/pharos/src/lib.rs @@ -0,0 +1,214 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! ISMP Consensus Client for Pharos Network. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::{boxed::Box, collections::BTreeMap, string::ToString, vec, vec::Vec}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use geth_primitives::Header; +use ismp::{ + consensus::{ + ConsensusClient, ConsensusClientId, ConsensusStateId, StateCommitment, StateMachineClient, + StateMachineId, + }, + error::Error, + host::{IsmpHost, StateMachine}, + messaging::StateCommitmentHeight, +}; +pub use pharos_primitives::{Mainnet, Testnet}; +use pharos_primitives::{ + ValidatorSet, VerifierState, VerifierStateUpdate, PHAROS_ATLANTIC_CHAIN_ID, + PHAROS_MAINNET_CHAIN_ID, +}; +use pharos_state_machine::PharosStateMachine; +use pharos_verifier::verify_pharos_block; +use polkadot_sdk::*; +use sp_core::H256; + +/// Consensus state ID for Pharos +pub const PHAROS_CONSENSUS_CLIENT_ID: ConsensusStateId = *b"PHAR"; + +/// Consensus state for Pharos light client. +#[derive(codec::Encode, codec::Decode, Debug, Default, PartialEq, Eq, Clone)] +pub struct ConsensusState { + pub current_validators: ValidatorSet, + pub finalized_height: u64, + pub finalized_hash: H256, + pub current_epoch: u64, + pub chain_id: u32, +} + +impl From for VerifierState { + fn from(state: ConsensusState) -> Self { + VerifierState { + current_validator_set: state.current_validators, + finalized_block_number: state.finalized_height, + finalized_hash: state.finalized_hash, + current_epoch: state.current_epoch, + } + } +} + +/// The Pharos consensus client. +pub struct PharosClient< + H: IsmpHost, + T: pallet_ismp_host_executive::Config, + C: pharos_primitives::Config, +>(PhantomData<(H, T, C)>); + +impl Default + for PharosClient +{ + fn default() -> Self { + Self(PhantomData) + } +} + +impl Clone + for PharosClient +{ + fn clone(&self) -> Self { + Self(PhantomData) + } +} + +impl< + H: IsmpHost + Send + Sync + Default + 'static, + T: pallet_ismp_host_executive::Config, + C: pharos_primitives::Config, + > ConsensusClient for PharosClient +{ + fn verify_consensus( + &self, + _host: &dyn IsmpHost, + consensus_state_id: ConsensusStateId, + trusted_consensus_state: Vec, + proof: Vec, + ) -> Result<(Vec, ismp::consensus::VerifiedCommitments), Error> { + let update = VerifierStateUpdate::decode(&mut &proof[..]) + .map_err(|e| Error::AnyHow(anyhow::anyhow!("{:?}", e).into()))?; + + let consensus_state = + ConsensusState::decode(&mut &trusted_consensus_state[..]).map_err(|e| { + Error::AnyHow( + anyhow::anyhow!("Cannot decode trusted consensus state: {:?}", e).into(), + ) + })?; + + let trusted_state: VerifierState = consensus_state.clone().into(); + + let new_state = verify_pharos_block::(trusted_state, update.clone()) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + let state_commitment = StateCommitmentHeight { + commitment: StateCommitment { + timestamp: update.header.timestamp, + overlay_root: None, + state_root: update.header.state_root, + }, + height: new_state.finalized_block_number, + }; + + let new_consensus_state = ConsensusState { + current_validators: new_state.current_validator_set, + finalized_height: new_state.finalized_block_number, + finalized_hash: new_state.finalized_hash, + current_epoch: new_state.current_epoch, + chain_id: consensus_state.chain_id, + }; + + let mut state_machine_map: BTreeMap> = + BTreeMap::new(); + state_machine_map.insert( + StateMachineId { + state_id: StateMachine::Evm(new_consensus_state.chain_id), + consensus_state_id, + }, + vec![state_commitment], + ); + + Ok((new_consensus_state.encode(), state_machine_map)) + } + + fn verify_fraud_proof( + &self, + _host: &dyn IsmpHost, + trusted_consensus_state: Vec, + proof_1: Vec, + proof_2: Vec, + ) -> Result<(), Error> { + let update_1 = VerifierStateUpdate::decode(&mut &proof_1[..]).map_err(|e| { + Error::AnyHow( + anyhow::anyhow!("Cannot decode pharos update for proof 1: {:?}", e).into(), + ) + })?; + + let update_2 = VerifierStateUpdate::decode(&mut &proof_2[..]).map_err(|e| { + Error::AnyHow( + anyhow::anyhow!("Cannot decode pharos update for proof 2: {:?}", e).into(), + ) + })?; + + let header_1 = &update_1.header; + let header_2 = &update_2.header; + + if header_1.number != header_2.number { + return Err(Error::Custom("Invalid fraud proof: different block numbers".to_string())); + } + + let header_1_hash = Header::from(header_1).hash::(); + let header_2_hash = Header::from(header_2).hash::(); + + if header_1_hash == header_2_hash { + return Err(Error::Custom("Invalid fraud proof: identical headers".to_string())); + } + + let consensus_state = + ConsensusState::decode(&mut &trusted_consensus_state[..]).map_err(|e| { + Error::AnyHow( + anyhow::anyhow!("Cannot decode trusted consensus state: {:?}", e).into(), + ) + })?; + + let trusted_state: VerifierState = consensus_state.into(); + + verify_pharos_block::(trusted_state.clone(), update_1) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + verify_pharos_block::(trusted_state, update_2) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + Ok(()) + } + + fn consensus_client_id(&self) -> ConsensusClientId { + PHAROS_CONSENSUS_CLIENT_ID + } + + fn state_machine(&self, id: StateMachine) -> Result, Error> { + match id { + StateMachine::Evm(chain_id) + if chain_id == PHAROS_MAINNET_CHAIN_ID || chain_id == PHAROS_ATLANTIC_CHAIN_ID => + Ok(Box::new(>::default())), + state_machine => + Err(Error::Custom(alloc::format!("Unsupported state machine: {state_machine:?}"))), + } + } +} diff --git a/modules/ismp/core/src/error.rs b/modules/ismp/core/src/error.rs index 6bd64ab43..31f4247b8 100644 --- a/modules/ismp/core/src/error.rs +++ b/modules/ismp/core/src/error.rs @@ -20,7 +20,10 @@ use crate::{ consensus::{ConsensusClientId, ConsensusStateId, StateMachineHeight, StateMachineId}, events::Meta, }; -use alloc::{string::String, vec::Vec}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use codec::{Decode, Encode}; use core::time::Duration; use scale_info::TypeInfo; @@ -214,4 +217,51 @@ pub enum Error { }, /// Error decoding signature SignatureDecodingFailed, + /// Anyhow error: {0} + AnyHow(AnyhowError), +} + +/// SCALE-compatible wrapper around [`anyhow::Error`]. +#[derive(Debug)] +pub struct AnyhowError(pub anyhow::Error); + +impl core::fmt::Display for AnyhowError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Display::fmt(&self.0, f) + } +} + +impl PartialEq for AnyhowError { + fn eq(&self, other: &Self) -> bool { + self.0.to_string() == other.0.to_string() + } +} + +impl Eq for AnyhowError {} + +impl Encode for AnyhowError { + fn encode_to(&self, dest: &mut W) { + self.0.to_string().encode_to(dest) + } +} + +impl Decode for AnyhowError { + fn decode(input: &mut I) -> Result { + let s = String::decode(input)?; + Ok(Self(anyhow::Error::msg(s))) + } +} + +impl TypeInfo for AnyhowError { + type Identity = String; + + fn type_info() -> scale_info::Type { + String::type_info() + } +} + +impl From for AnyhowError { + fn from(e: anyhow::Error) -> Self { + Self(e) + } } diff --git a/modules/ismp/state-machines/pharos/Cargo.toml b/modules/ismp/state-machines/pharos/Cargo.toml new file mode 100644 index 000000000..377461f41 --- /dev/null +++ b/modules/ismp/state-machines/pharos/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "pharos-state-machine" +version = "0.1.0" +edition = "2021" +description = "Pharos state machine verification using hexary hash tree (SHA-256) proofs" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +ismp = { workspace = true } +pharos-primitives = { workspace = true } +pallet-ismp-host-executive = { workspace = true } +geth-primitives = { workspace = true } +evm-state-machine = { path = "../evm", default-features = false } + +anyhow = { workspace = true, default-features = false } +codec = { workspace = true, default-features = false } +primitive-types = { workspace = true } +ethabi = { workspace = true } +hex-literal = { workspace = true } +alloy-rlp = { workspace = true } + +[dependencies.polkadot-sdk] +workspace = true +features = ["sp-core"] + +[features] +default = ["std"] +std = [ + "codec/std", + "ismp/std", + "pharos-primitives/std", + "pallet-ismp-host-executive/std", + "geth-primitives/std", + "evm-state-machine/std", + "primitive-types/std", + "ethabi/std", + "alloy-rlp/std", + "anyhow/std", + "polkadot-sdk/std", +] diff --git a/modules/ismp/state-machines/pharos/src/lib.rs b/modules/ismp/state-machines/pharos/src/lib.rs new file mode 100644 index 000000000..b7dc60bdf --- /dev/null +++ b/modules/ismp/state-machines/pharos/src/lib.rs @@ -0,0 +1,257 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pharos state machine verification. +//! +//! Uses Pharos hexary hash tree proofs with SHA-256 hashing instead of +//! Ethereum's Merkle-Patricia Trie with Keccak-256. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::{collections::BTreeMap, string::ToString, vec::Vec}; +use codec::{Decode, Encode}; +use evm_state_machine::{req_res_commitment_key, req_res_receipt_keys}; +use ismp::{ + consensus::{StateCommitment, StateMachineClient}, + error::Error, + host::IsmpHost, + messaging::{Keccak256, Proof}, + router::RequestResponse, +}; +use pallet_ismp_host_executive::EvmHosts; +use pharos_primitives::{spv, NonExistenceProof, PharosProofNode}; +use primitive_types::{H160, H256}; + +/// Account proof data for a 20-byte address query. +#[derive(Encode, Decode, Clone)] +pub struct AccountProofData { + /// Proof nodes from MSU root to the account leaf + pub proof_nodes: Vec, + /// RLP-encoded account value (nonce, balance, storage_root, code_hash) + pub raw_value: Vec, +} + +/// Pharos-specific state proof (replaces EvmStateProof). +/// +/// Contains Pharos hexary hash tree proof data with SHA-256 hashing. +/// Pharos uses a flat trie where storage proofs verify directly against +/// the state_root, so no separate account proof is needed for storage queries. +#[derive(Encode, Decode, Clone)] +pub struct PharosStateProof { + /// Map of storage key (slot hash) to storage proof nodes + pub storage_proof: BTreeMap, Vec>, + /// Map of storage key (slot hash) to the 32-byte padded storage value + pub storage_values: BTreeMap, Vec>, + /// Map of storage key (slot hash) to non-existence proof for absent keys + pub non_existence_proofs: BTreeMap, NonExistenceProof>, + /// Map of account address (20 bytes) to account proof data + pub account_proofs: BTreeMap, AccountProofData>, +} + +/// Pharos state machine client for ISMP state proof verification. +pub struct PharosStateMachine( + core::marker::PhantomData<(H, T)>, +); + +impl Default for PharosStateMachine { + fn default() -> Self { + Self(core::marker::PhantomData) + } +} + +impl Clone for PharosStateMachine { + fn clone(&self) -> Self { + PharosStateMachine::::default() + } +} + +impl StateMachineClient + for PharosStateMachine +{ + fn verify_membership( + &self, + _host: &dyn IsmpHost, + item: RequestResponse, + root: StateCommitment, + proof: &Proof, + ) -> Result<(), Error> { + let contract_address = EvmHosts::::get(&proof.height.id.state_id) + .ok_or_else(|| Error::Custom("Ismp contract address not found".to_string()))?; + verify_membership::(item, root, proof, contract_address) + } + + fn receipts_state_trie_key(&self, items: RequestResponse) -> Vec> { + req_res_receipt_keys::(items) + } + + fn verify_state_proof( + &self, + _host: &dyn IsmpHost, + keys: Vec>, + root: StateCommitment, + proof: &Proof, + ) -> Result, Option>>, Error> { + let ismp_address = EvmHosts::::get(&proof.height.id.state_id) + .ok_or_else(|| Error::Custom("Ismp contract address not found".to_string()))?; + verify_state_proof::(keys, root, proof, ismp_address) + } +} + +/// Decode a PharosStateProof from the proof bytes. +fn decode_pharos_state_proof(proof: &Proof) -> Result { + PharosStateProof::decode(&mut &proof.proof[..]).map_err(|e| { + Error::AnyHow(anyhow::anyhow!("Cannot decode pharos state proof: {:?}", e).into()) + }) +} + +/// Verify membership of ISMP commitments in the Pharos state. +pub fn verify_membership( + item: RequestResponse, + root: StateCommitment, + proof: &Proof, + contract_address: H160, +) -> Result<(), Error> { + let pharos_proof = decode_pharos_state_proof(proof)?; + + let state_root = H256::from_slice(&root.state_root[..]); + let address: [u8; 20] = contract_address.0; + + let commitment_keys = req_res_commitment_key::(item, |k| k.to_vec()); + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + for slot_hash in commitment_keys { + let storage_proof_nodes = pharos_proof + .storage_proof + .get(&slot_hash) + .ok_or_else(|| Error::Custom("Missing storage proof for commitment key".to_string()))?; + + let slot_key: [u8; 32] = slot_hash.try_into().map_err(|e: Vec| { + Error::Custom(alloc::format!("Invalid slot hash length: expected 32, got {}", e.len())) + })?; + + spv::verify_membership_proof( + storage_proof_nodes, + &spv::build_storage_key(&address, &slot_key), + &state_root.0, + ) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + } + + Ok(()) +} + +/// Verify state proof and return key-value map. +pub fn verify_state_proof( + keys: Vec>, + root: StateCommitment, + proof: &Proof, + ismp_address: H160, +) -> Result, Option>>, Error> { + let pharos_proof = decode_pharos_state_proof(proof)?; + + let state_root = H256::from_slice(&root.state_root[..]); + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + let mut map = BTreeMap::new(); + + for key in keys { + let (contract_addr, slot_hash) = if key.len() == 52 { + // First 20 bytes = contract address, last 32 = slot hash + let addr = H160::from_slice(&key[..20]); + (addr, key[20..].to_vec()) + } else if key.len() == 32 { + // Direct slot hash for the ISMP host contract + (ismp_address, key.clone()) + } else if key.len() == 20 { + // Account query which verifies account proof and return raw account value + let address: [u8; 20] = key.clone().try_into().map_err(|e: Vec| { + Error::Custom(alloc::format!("Invalid address: expected 20 bytes, got {}", e.len())) + })?; + let account_data = pharos_proof + .account_proofs + .get(&key) + .ok_or_else(|| Error::Custom("Missing account proof".to_string()))?; + + spv::verify_proof( + &account_data.proof_nodes, + &address, + &account_data.raw_value, + &state_root.0, + ) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + map.insert(key, Some(account_data.raw_value.clone())); + continue; + } else { + return Err(Error::Custom( + "Unsupported key type: expected length 20, 32, or 52".to_string(), + )); + }; + + let contract_address: [u8; 20] = contract_addr.0; + + let slot_key: [u8; 32] = slot_hash.clone().try_into().map_err(|e: Vec| { + Error::AnyHow( + anyhow::anyhow!("Invalid slot hash length: expected 32, got {}", e.len()).into(), + ) + })?; + + // Check if this is a non-existence proof + if let Some(non_existence) = pharos_proof.non_existence_proofs.get(slot_key.as_slice()) { + spv::verify_non_existence_proof( + &non_existence.proof_nodes, + &spv::build_storage_key(&contract_address, &slot_key), + &state_root.0, + &non_existence.sibling_proofs, + ) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + map.insert(key, None); + continue; + } + + // Otherwise verify existence proof + let storage_proof_nodes = pharos_proof + .storage_proof + .get(slot_key.as_slice()) + .ok_or_else(|| Error::Custom("Missing storage proof for key".to_string()))?; + + let storage_value = pharos_proof + .storage_values + .get(&slot_hash) + .ok_or_else(|| Error::Custom("Missing storage value for key".to_string()))?; + + // Pad value to 32 bytes for proof verification + let mut padded_value = [0u8; 32]; + if storage_value.len() <= 32 { + padded_value[32 - storage_value.len()..].copy_from_slice(storage_value); + } else { + return Err(Error::Custom("Storage value exceeds 32 bytes".to_string())); + } + + spv::verify_proof( + storage_proof_nodes, + &spv::build_storage_key(&contract_address, &slot_key), + &padded_value, + &state_root.0, + ) + .map_err(|e| Error::AnyHow(anyhow::Error::from(e).into()))?; + + map.insert(key, Some(storage_value.clone())); + } + + Ok(map) +} diff --git a/modules/pallets/testsuite/Cargo.toml b/modules/pallets/testsuite/Cargo.toml index bfd238473..cfa674ce1 100644 --- a/modules/pallets/testsuite/Cargo.toml +++ b/modules/pallets/testsuite/Cargo.toml @@ -52,6 +52,12 @@ hyperbridge-client-machine = {workspace = true, default-features = true } evm-state-machine = { workspace = true, default-features = true } subxt-utils = { workspace = true, default-features = true } ismp-grandpa = { workspace = true, default-features = true } +ismp-pharos = { workspace = true, default-features = true } +pharos-primitives = { workspace = true, default-features = true } +pharos-prover = { workspace = true, default-features = true } +geth-primitives = { workspace = true, default-features = true } +bls = { workspace = true } +crypto-utils = { workspace = true, default-features = true } rs_merkle = { version = "1.5.0"} log = { workspace = true } primitive-types = { workspace = true } diff --git a/modules/pallets/testsuite/src/runtime.rs b/modules/pallets/testsuite/src/runtime.rs index c32f4a5c8..05fa88cb4 100644 --- a/modules/pallets/testsuite/src/runtime.rs +++ b/modules/pallets/testsuite/src/runtime.rs @@ -258,6 +258,7 @@ impl pallet_ismp::Config for Test { Test, HyperbridgeClientMachine, >, + ismp_pharos::PharosClient, ); type OffchainDB = Mmr; type FeeHandler = ( diff --git a/modules/pallets/testsuite/src/tests/ismp_pharos.rs b/modules/pallets/testsuite/src/tests/ismp_pharos.rs new file mode 100644 index 000000000..76269b0e9 --- /dev/null +++ b/modules/pallets/testsuite/src/tests/ismp_pharos.rs @@ -0,0 +1,239 @@ +// Copyright (c) 2025 Polytope Labs. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::runtime::{Ismp, Test}; +use codec::{Decode, Encode}; +use ismp::{ + consensus::{ConsensusClient, StateMachineId}, + host::StateMachine, +}; +use ismp_pharos::{ConsensusState, PharosClient, PHAROS_CONSENSUS_CLIENT_ID}; +use pharos_primitives::{Config, Testnet, PHAROS_ATLANTIC_CHAIN_ID}; +use pharos_prover::PharosProver; +use primitive_types::H256; + +#[tokio::test] +#[ignore] +async fn test_ismp_pharos_non_epoch_boundary_consensus_verification() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let prover = PharosProver::::new(&rpc_url).await.expect("Failed to create prover"); + + let latest_block_num = prover.get_latest_block().await.expect("Failed to get block number"); + println!("Latest block: {}", latest_block_num); + + let mut target_block = latest_block_num.saturating_sub(5); + + // ensuring we're not at an epoch boundary so as to avoid needing staking contract verification + while Testnet::is_epoch_boundary(target_block) { + target_block = target_block.saturating_sub(1); + } + println!("Target block: {}", target_block); + + let validator_info = + prover.rpc.get_validator_info(None).await.expect("Failed to get validator info"); + println!("Validators: {}", validator_info.validator_set.len()); + + let current_epoch = Testnet::compute_epoch(target_block); + let validator_set = prover + .build_validator_set(&validator_info.validator_set, current_epoch) + .expect("Failed to build validator set"); + println!("Total stake: {}", validator_set.total_stake); + + let initial_block = target_block - 1; + let initial_consensus_state = ConsensusState { + current_validators: validator_set, + finalized_height: initial_block, + finalized_hash: H256::zero(), + current_epoch, + chain_id: PHAROS_ATLANTIC_CHAIN_ID, + }; + + let update = prover + .fetch_block_update(target_block) + .await + .expect("Failed to fetch block update"); + println!("Block update is for block: {}", update.block_number()); + println!("Participant keys Length: {}", update.block_proof.participant_count()); + + let pharos_client = PharosClient::::default(); + + let host = Ismp::default(); + let result = pharos_client.verify_consensus( + &host, + PHAROS_CONSENSUS_CLIENT_ID, + initial_consensus_state.encode(), + update.encode(), + ); + + match result { + Ok((new_state_bytes, commitments)) => { + let new_state = ConsensusState::decode(&mut &new_state_bytes[..]) + .expect("Failed to decode new state"); + + println!("\nVerification Successful"); + println!("Finalized height: {}", new_state.finalized_height); + println!("Epoch: {}", new_state.current_epoch); + + // the epoch should remain the same + assert_eq!( + new_state.current_epoch, initial_consensus_state.current_epoch, + "Epoch should not change for non-epoch-boundary blocks" + ); + + let state_id = StateMachineId { + state_id: StateMachine::Evm(PHAROS_ATLANTIC_CHAIN_ID), + consensus_state_id: PHAROS_CONSENSUS_CLIENT_ID, + }; + assert!(commitments.contains_key(&state_id), "Should have state commitment"); + + let heights = &commitments[&state_id]; + assert_eq!(heights.len(), 1, "Should have exactly one state commitment"); + assert_eq!( + heights[0].height, target_block, + "Commitment height should match the target block" + ); + + assert_eq!( + new_state.finalized_height, target_block, + "Finalized height should match the target block" + ); + }, + Err(e) => { + panic!("Verification failed: {:?}", e); + }, + } +} + +#[tokio::test] +#[ignore] +async fn test_ismp_pharos_epoch_boundary_consensus_verification() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let prover = PharosProver::::new(&rpc_url).await.expect("Failed to create prover"); + + let latest_block_num = prover.get_latest_block().await.expect("Failed to get block number"); + println!("Latest block: {}", latest_block_num); + + // Find the most recent epoch boundary block. + let epoch_length = Testnet::EPOCH_LENGTH_BLOCKS; + let current_epoch = Testnet::compute_epoch(latest_block_num); + // last epoch boundary block is at (current_epoch * epoch_length) - 1 + let target_block = (current_epoch * epoch_length) - 1; + + assert!( + Testnet::is_epoch_boundary(target_block), + "Target block {} should be an epoch boundary", + target_block + ); + println!("Target epoch boundary block: {}", target_block); + println!( + "Current epoch: {}, target epoch: {}", + current_epoch, + Testnet::compute_epoch(target_block) + ); + + let validator_info = prover + .rpc + .get_validator_info(Some(target_block)) + .await + .expect("Failed to get validator info"); + println!("Validators: {}", validator_info.validator_set.len()); + + let target_epoch = Testnet::compute_epoch(target_block); + let validator_set = prover + .build_validator_set(&validator_info.validator_set, target_epoch) + .expect("Failed to build validator set"); + println!("Total stake: {}", validator_set.total_stake); + + // trusted consensus state at the block before the epoch boundary + let initial_block = target_block - 1; + let initial_consensus_state = ConsensusState { + current_validators: validator_set, + finalized_height: initial_block, + finalized_hash: H256::zero(), + current_epoch: target_epoch, + chain_id: PHAROS_ATLANTIC_CHAIN_ID, + }; + + // should include a validator_set_proof because it's an epoch boundary. + let update = prover + .fetch_block_update(target_block) + .await + .expect("Failed to fetch block update for epoch boundary"); + println!("Block update is for block: {}", update.block_number()); + println!("Participant keys Length: {}", update.block_proof.participant_count()); + assert!( + update.validator_set_proof.is_some(), + "Epoch boundary block should include a validator set proof" + ); + + let pharos_client = PharosClient::::default(); + + let host = Ismp::default(); + let result = pharos_client.verify_consensus( + &host, + PHAROS_CONSENSUS_CLIENT_ID, + initial_consensus_state.encode(), + update.encode(), + ); + + match result { + Ok((new_state_bytes, commitments)) => { + let new_state = ConsensusState::decode(&mut &new_state_bytes[..]) + .expect("Failed to decode new state"); + + println!("\nEpoch Boundary Verification Successful"); + println!("Finalized height: {}", new_state.finalized_height); + println!("Previous epoch: {}", initial_consensus_state.current_epoch); + println!("New epoch: {}", new_state.current_epoch); + println!("New validator count: {}", new_state.current_validators.len()); + + assert_eq!( + new_state.current_epoch, + initial_consensus_state.current_epoch + 1, + "Epoch should increment by 1 at epoch boundary" + ); + + assert_eq!( + new_state.finalized_height, target_block, + "Finalized height should match the epoch boundary block" + ); + + assert!( + !new_state.current_validators.is_empty(), + "New validator set should not be empty" + ); + + let state_id = StateMachineId { + state_id: StateMachine::Evm(PHAROS_ATLANTIC_CHAIN_ID), + consensus_state_id: PHAROS_CONSENSUS_CLIENT_ID, + }; + assert!(commitments.contains_key(&state_id), "Should have state commitment"); + + let heights = &commitments[&state_id]; + assert_eq!(heights.len(), 1, "Should have exactly one state commitment"); + assert_eq!( + heights[0].height, target_block, + "Commitment height should match the epoch boundary block" + ); + }, + Err(e) => { + panic!("Epoch boundary verification failed: {:?}", e); + }, + } +} diff --git a/modules/pallets/testsuite/src/tests/mod.rs b/modules/pallets/testsuite/src/tests/mod.rs index aa4ac8f2a..a39238acb 100644 --- a/modules/pallets/testsuite/src/tests/mod.rs +++ b/modules/pallets/testsuite/src/tests/mod.rs @@ -10,9 +10,11 @@ mod pallet_xcm_gateway; mod xcm_integration_test; mod common; +mod ismp_pharos; mod pallet_bridge_airdrop; mod pallet_collator_manager; mod pallet_consensus_incentives; mod pallet_messaging_fees; mod pallet_token_gateway; +mod pharos_state_machine; mod substrate_evm_state_machine; diff --git a/modules/pallets/testsuite/src/tests/pallet_collator_manager.rs b/modules/pallets/testsuite/src/tests/pallet_collator_manager.rs index 6856feb76..6ba1150d0 100644 --- a/modules/pallets/testsuite/src/tests/pallet_collator_manager.rs +++ b/modules/pallets/testsuite/src/tests/pallet_collator_manager.rs @@ -104,7 +104,6 @@ fn test_new_collators_are_selected_based_on_reputation() { assert_ok!(CollatorManager::reserve(&charlie_stash, 100 * UNIT)); - assert_ok!(CollatorManager::reserve(&dave_stash, 100 * UNIT)); set_reputation_balance(&CHARLIE, 20 * UNIT); diff --git a/modules/pallets/testsuite/src/tests/pharos_state_machine.rs b/modules/pallets/testsuite/src/tests/pharos_state_machine.rs new file mode 100644 index 000000000..61054608c --- /dev/null +++ b/modules/pallets/testsuite/src/tests/pharos_state_machine.rs @@ -0,0 +1,375 @@ +// Copyright (c) 2025 Polytope Labs. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use pharos_primitives::{spv, Config, Testnet, STAKING_CONTRACT_ADDRESS}; +use pharos_prover::{ + rpc::{hex_to_bytes, PharosRpcClient}, + rpc_to_proof_nodes, rpc_to_sibling_proofs, PharosProver, +}; +use primitive_types::{H160, H256, U256}; +use std::sync::Arc; + +#[tokio::test] +#[ignore] +async fn test_pharos_account_proof_verification() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + println!("State root: {:?}", state_root); + + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let total_stake_slot = H256(U256::from(6u64).to_big_endian()); + let proof = rpc + .get_proof(address, vec![total_stake_slot], target_block) + .await + .expect("Failed to get proof"); + + println!("Account proof nodes: {}", proof.account_proof.len()); + println!("Storage hash: {}", proof.storage_hash); + println!("Raw value length: {}", proof.raw_value.len()); + + let account_proof_nodes = + rpc_to_proof_nodes(&proof.account_proof).expect("Failed to convert account proof nodes"); + let raw_value = hex_to_bytes(&proof.raw_value).expect("Failed to parse raw_value"); + + assert!(!account_proof_nodes.is_empty(), "Account proof should not be empty"); + assert!(!raw_value.is_empty(), "Raw account value should not be empty"); + + let address_bytes: [u8; 20] = address.0; + spv::verify_proof(&account_proof_nodes, &address_bytes, &raw_value, &state_root.0) + .expect("Account proof verification should pass for staking contract"); + println!("Account proof verification: PASSED"); +} + +#[tokio::test] +#[ignore] +async fn test_pharos_storage_proof_verification() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + println!("State root: {:?}", state_root); + + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let total_stake_slot = H256(U256::from(6u64).to_big_endian()); + let proof = rpc + .get_proof(address, vec![total_stake_slot], target_block) + .await + .expect("Failed to get proof"); + + println!("Account proof nodes: {}", proof.account_proof.len()); + println!("Storage proof entries: {}", proof.storage_proof.len()); + println!("Storage hash: {}", proof.storage_hash); + + let account_proof_nodes = + rpc_to_proof_nodes(&proof.account_proof).expect("Failed to convert account proof nodes"); + let raw_value = hex_to_bytes(&proof.raw_value).expect("Failed to parse raw_value"); + let address_bytes: [u8; 20] = address.0; + + spv::verify_proof(&account_proof_nodes, &address_bytes, &raw_value, &state_root.0) + .expect("Account proof verification should pass"); + println!("Account proof verification: PASSED"); + + assert!(!proof.storage_proof.is_empty(), "Should have at least one storage proof"); + let storage_entry = &proof.storage_proof[0]; + let storage_proof_nodes = + rpc_to_proof_nodes(&storage_entry.proof).expect("Failed to convert storage proof nodes"); + + println!("Storage key: {}", storage_entry.key); + println!("Storage value: {}", storage_entry.value); + println!("Storage proof nodes: {}", storage_proof_nodes.len()); + + assert!(!storage_proof_nodes.is_empty(), "Storage proof should not be empty"); + + let value_bytes = hex_to_bytes(&storage_entry.value).expect("Failed to parse storage value"); + let mut padded_value = [0u8; 32]; + if value_bytes.len() <= 32 { + padded_value[32 - value_bytes.len()..].copy_from_slice(&value_bytes); + } + + let total_stake = U256::from_big_endian(&padded_value); + println!("Total stake: {}", total_stake); + assert!(total_stake > U256::zero(), "Total stake should be non-zero"); + + let key_bytes = hex_to_bytes(&storage_entry.key).expect("Failed to parse storage key"); + let mut storage_key = [0u8; 32]; + if key_bytes.len() <= 32 { + storage_key[32 - key_bytes.len()..].copy_from_slice(&key_bytes); + } + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + spv::verify_proof( + &storage_proof_nodes, + &spv::build_storage_key(&address_bytes, &storage_key), + &padded_value, + &state_root.0, + ) + .expect("Storage proof verification should pass for totalStake"); + println!("Storage proof verification: PASSED"); +} + +#[tokio::test] +#[ignore] +async fn test_pharos_multiple_storage_proofs() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let total_stake_slot = H256(U256::from(6u64).to_big_endian()); + let epoch_length_slot = H256(U256::from(5u64).to_big_endian()); + + let proof_stake = rpc + .get_proof(address, vec![total_stake_slot], target_block) + .await + .expect("Failed to get proof for totalStake"); + + let proof_epoch = rpc + .get_proof(address, vec![epoch_length_slot], target_block) + .await + .expect("Failed to get proof for epochLength"); + + let account_proof_nodes = rpc_to_proof_nodes(&proof_stake.account_proof) + .expect("Failed to convert account proof nodes"); + let raw_value = hex_to_bytes(&proof_stake.raw_value).expect("Failed to parse raw_value"); + let address_bytes: [u8; 20] = address.0; + + spv::verify_proof(&account_proof_nodes, &address_bytes, &raw_value, &state_root.0) + .expect("Account proof verification should pass"); + println!("Account proof verification: PASSED"); + + // Pharos uses a flat trie — storage proofs verify directly against state_root. + assert!(!proof_stake.storage_proof.is_empty(), "Should have storage proof for totalStake"); + let stake_entry = &proof_stake.storage_proof[0]; + let stake_proof_nodes = + rpc_to_proof_nodes(&stake_entry.proof).expect("Failed to convert storage proof nodes"); + + let stake_value_bytes = + hex_to_bytes(&stake_entry.value).expect("Failed to parse totalStake value"); + let mut stake_padded = [0u8; 32]; + if stake_value_bytes.len() <= 32 { + stake_padded[32 - stake_value_bytes.len()..].copy_from_slice(&stake_value_bytes); + } + + let stake_key_bytes = hex_to_bytes(&stake_entry.key).expect("Failed to parse storage key"); + let mut stake_key = [0u8; 32]; + if stake_key_bytes.len() <= 32 { + stake_key[32 - stake_key_bytes.len()..].copy_from_slice(&stake_key_bytes); + } + + let total_stake = U256::from_big_endian(&stake_padded); + println!("Storage proof [totalStake]: key={}, value={}", stake_entry.key, total_stake); + assert!(total_stake > U256::zero(), "Total stake should be non-zero"); + + spv::verify_proof( + &stake_proof_nodes, + &spv::build_storage_key(&address_bytes, &stake_key), + &stake_padded, + &state_root.0, + ) + .expect("Storage proof for totalStake should pass"); + println!("Storage proof [totalStake] verification: PASSED"); + + assert!(!proof_epoch.storage_proof.is_empty(), "Should have storage proof for epochLength"); + let epoch_entry = &proof_epoch.storage_proof[0]; + let epoch_proof_nodes = + rpc_to_proof_nodes(&epoch_entry.proof).expect("Failed to convert storage proof nodes"); + + let epoch_value_bytes = + hex_to_bytes(&epoch_entry.value).expect("Failed to parse epochLength value"); + let mut epoch_padded = [0u8; 32]; + if epoch_value_bytes.len() <= 32 { + epoch_padded[32 - epoch_value_bytes.len()..].copy_from_slice(&epoch_value_bytes); + } + + let epoch_key_bytes = hex_to_bytes(&epoch_entry.key).expect("Failed to parse storage key"); + let mut epoch_key = [0u8; 32]; + if epoch_key_bytes.len() <= 32 { + epoch_key[32 - epoch_key_bytes.len()..].copy_from_slice(&epoch_key_bytes); + } + + let epoch_length = U256::from_big_endian(&epoch_padded); + println!("Storage proof [epochLength]: key={}, value={}", epoch_entry.key, epoch_length); + assert!(epoch_length > U256::zero(), "Epoch length should be non-zero"); + + spv::verify_proof( + &epoch_proof_nodes, + &spv::build_storage_key(&address_bytes, &epoch_key), + &epoch_padded, + &state_root.0, + ) + .expect("Storage proof for epochLength should pass"); + println!("Storage proof [epochLength] verification: PASSED"); +} + +#[tokio::test] +#[ignore] +async fn test_pharos_non_existence_account_proof() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + + // Query a non-existent account + let fake_address = + H160::from_slice(&[0xde, 0xad, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + let dummy_slot = H256::zero(); + let proof = rpc + .get_proof(fake_address, vec![dummy_slot], target_block) + .await + .expect("Failed to get proof"); + + assert!(!proof.is_exist, "Account should not exist"); + + let proof_nodes = + rpc_to_proof_nodes(&proof.account_proof).expect("Failed to convert proof nodes"); + let sibling_proofs = rpc_to_sibling_proofs(&proof.sibling_leftmost_leaf_proofs) + .expect("Failed to convert sibling proofs"); + + let address_bytes: [u8; 20] = fake_address.0; + spv::verify_non_existence_proof(&proof_nodes, &address_bytes, &state_root.0, &sibling_proofs) + .expect("Non-existence proof should be valid for fake account"); + println!("Non-existence account proof: PASSED"); + + // Sanity check: the same proof must NOT pass as an existence proof + assert!( + spv::verify_membership_proof(&proof_nodes, &address_bytes, &state_root.0).is_err(), + "Membership check should fail for non-existent account" + ); + println!("Membership returns None as expected: PASSED"); +} + +#[tokio::test] +#[ignore] +async fn test_pharos_non_existence_storage_proof() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + + // Use the real staking contract but query a non-existent storage slot + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let fake_slot = H256::from_low_u64_be(999999); + let proof = rpc + .get_proof(address, vec![fake_slot], target_block) + .await + .expect("Failed to get proof"); + + assert!(!proof.storage_proof.is_empty(), "Should have a storage proof entry"); + let storage_entry = &proof.storage_proof[0]; + println!("Storage isExist: {}", storage_entry.is_exist); + println!("Storage proof nodes: {}", storage_entry.proof.len()); + println!("Storage sibling proofs: {}", storage_entry.sibling_leftmost_leaf_proofs.len()); + + if !storage_entry.is_exist { + let proof_nodes = rpc_to_proof_nodes(&storage_entry.proof) + .expect("Failed to convert storage proof nodes"); + let sibling_proofs = rpc_to_sibling_proofs(&storage_entry.sibling_leftmost_leaf_proofs) + .expect("Failed to convert sibling proofs"); + + let address_bytes: [u8; 20] = address.0; + let mut slot_key = [0u8; 32]; + slot_key.copy_from_slice(fake_slot.as_bytes()); + + spv::verify_non_existence_proof( + &proof_nodes, + &spv::build_storage_key(&address_bytes, &slot_key), + &state_root.0, + &sibling_proofs, + ) + .expect("Storage non-existence proof should be valid for fake slot"); + println!("Non-existence storage proof: PASSED"); + } else { + // Slot 999999 might actually exist if so, just verify the existence proof works + println!("Storage slot exists (unexpected), skipping non-existence test"); + } +} + +#[tokio::test] +#[ignore] +async fn test_pharos_account_proof_with_raw_value() { + let rpc_url = + std::env::var("PHAROS_ATLANTIC_RPC").expect("PHAROS_ATLANTIC_RPC env variable must be set"); + let rpc = PharosRpcClient::new(&rpc_url).expect("Failed to create RPC client"); + + let block_number = rpc.get_block_number().await.expect("Failed to get block number"); + let target_block = block_number.saturating_sub(5); + println!("Testing at block: {}", target_block); + + let header = rpc.get_block_by_number(target_block).await.expect("Failed to get block"); + let state_root = header.state_root; + + // Fetch account proof for staking contract with no storage keys + let address = H160::from_slice(STAKING_CONTRACT_ADDRESS.as_slice()); + let proof = rpc.get_proof(address, vec![], target_block).await.expect("Failed to get proof"); + + assert!(proof.is_exist, "Staking contract should exist"); + + let proof_nodes = + rpc_to_proof_nodes(&proof.account_proof).expect("Failed to convert proof nodes"); + let raw_value = hex_to_bytes(&proof.raw_value).expect("Failed to parse raw_value"); + + assert!(!raw_value.is_empty(), "Account raw value should not be empty"); + + let address_bytes: [u8; 20] = address.0; + spv::verify_proof(&proof_nodes, &address_bytes, &raw_value, &state_root.0) + .expect("Account proof should verify against state root"); + println!("Account proof with raw value: PASSED"); + + // Verify a non-existent account returns isExist: false + let fake_address = + H160::from_slice(&[0xde, 0xad, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); + let fake_proof = rpc + .get_proof(fake_address, vec![], target_block) + .await + .expect("Failed to get proof for fake address"); + + assert!(!fake_proof.is_exist, "Fake account should not exist"); + println!("Non-existent account isExist=false: PASSED"); +} diff --git a/modules/utils/crypto/Cargo.toml b/modules/utils/crypto/Cargo.toml index 565af5382..db32aa43a 100644 --- a/modules/utils/crypto/Cargo.toml +++ b/modules/utils/crypto/Cargo.toml @@ -16,6 +16,11 @@ anyhow = { workspace = true } scale-info = { workspace = true } sp-core = { workspace = true, default-features = false } sp-io = { workspace = true, default-features = false } +hex = { version = "0.4", default-features = false, features = ["alloc"] } +serde = { workspace = true, optional = true } +ssz-rs = { git = "https://github.com/polytope-labs/ssz-rs", branch = "main", default-features = false } +serde-hex-utils = { workspace = true, default-features = false } +bls = { workspace = true, default-features = false } [features] default = ["std"] @@ -25,4 +30,11 @@ std = [ "scale-info/std", "sp-core/std", "sp-io/std", + "hex/std", + "serde", + "serde/std", + "ssz-rs/default", + "ssz-rs/serde", + "serde-hex-utils/std", + "bls/std", ] diff --git a/modules/utils/crypto/src/bls.rs b/modules/utils/crypto/src/bls.rs new file mode 100644 index 000000000..aec3f5484 --- /dev/null +++ b/modules/utils/crypto/src/bls.rs @@ -0,0 +1,48 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! BLS12-381 cryptographic type definitions and utilities. + +use crate::ssz::ByteVector; +use alloc::vec::Vec; +use bls::{errors::BLSError, types::G1ProjectivePoint}; + +/// Length of a BLS12-381 public key in bytes (compressed G1 point). +pub const BLS_PUBLIC_KEY_BYTES_LEN: usize = 48; + +/// Length of a BLS12-381 signature in bytes (compressed G2 point). +pub const BLS_SIGNATURE_BYTES_LEN: usize = 96; + +/// A BLS12-381 public key (48 bytes compressed). +pub type BlsPublicKey = ByteVector; + +/// A BLS12-381 signature (96 bytes compressed). +pub type BlsSignature = ByteVector; + +/// Convert a compressed BLS public key to a projective point. +pub fn pubkey_to_projective(compressed_key: &BlsPublicKey) -> Result { + let affine_point = bls::pubkey_to_point(&compressed_key.to_vec())?; + Ok(affine_point.into()) +} + +/// Aggregate multiple BLS public keys into a single public key. +pub fn aggregate_public_keys(keys: &[BlsPublicKey]) -> Vec { + let aggregate = keys + .iter() + .filter_map(|key| pubkey_to_projective(key).ok()) + .fold(G1ProjectivePoint::default(), |acc, next| acc + next); + + bls::point_to_pubkey(aggregate.into()) +} diff --git a/modules/utils/crypto/src/lib.rs b/modules/utils/crypto/src/lib.rs index c2d05f546..8d05a608c 100644 --- a/modules/utils/crypto/src/lib.rs +++ b/modules/utils/crypto/src/lib.rs @@ -16,4 +16,12 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +pub mod bls; +pub mod ssz; pub mod verification; + +pub use bls::{ + aggregate_public_keys, pubkey_to_projective, BlsPublicKey, BlsSignature, + BLS_PUBLIC_KEY_BYTES_LEN, BLS_SIGNATURE_BYTES_LEN, +}; +pub use ssz::ByteVector; diff --git a/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs b/modules/utils/crypto/src/ssz/byte_vector.rs similarity index 67% rename from modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs rename to modules/utils/crypto/src/ssz/byte_vector.rs index 56903d721..5a2edf20e 100644 --- a/modules/consensus/sync-committee/primitives/src/ssz/byte_vector.rs +++ b/modules/utils/crypto/src/ssz/byte_vector.rs @@ -1,6 +1,22 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use super::write_bytes_to_lower_hex; use alloc::{vec, vec::Vec}; use core::{ + cmp::Ordering, fmt, hash::{Hash, Hasher}, ops::{Deref, DerefMut}, @@ -36,6 +52,18 @@ impl PartialEq for ByteVector { } } +impl PartialOrd for ByteVector { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ByteVector { + fn cmp(&self, other: &Self) -> Ordering { + self.as_ref().cmp(other.as_ref()) + } +} + impl Hash for ByteVector { fn hash(&self, state: &mut H) { self.as_ref().hash(state); diff --git a/modules/utils/crypto/src/ssz/mod.rs b/modules/utils/crypto/src/ssz/mod.rs new file mode 100644 index 000000000..20e02f7b6 --- /dev/null +++ b/modules/utils/crypto/src/ssz/mod.rs @@ -0,0 +1,29 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod byte_vector; +use core::fmt; + +fn write_bytes_to_lower_hex>(f: &mut fmt::Formatter<'_>, data: T) -> fmt::Result { + if f.alternate() { + write!(f, "0x")?; + } + for i in data.as_ref() { + write!(f, "{i:02x}")?; + } + Ok(()) +} + +pub use byte_vector::ByteVector; diff --git a/parachain/runtimes/gargantua/Cargo.toml b/parachain/runtimes/gargantua/Cargo.toml index 94f7e3705..a174cb296 100644 --- a/parachain/runtimes/gargantua/Cargo.toml +++ b/parachain/runtimes/gargantua/Cargo.toml @@ -50,6 +50,7 @@ pallet-bridge-airdrop = { workspace = true } ismp-arbitrum = { workspace = true } ismp-optimism = { workspace = true } ismp-polygon = { workspace = true } +ismp-pharos = { workspace = true } ismp-tendermint = { workspace = true } pallet-messaging-fees = { workspace = true } evm-state-machine = { workspace = true } @@ -156,7 +157,8 @@ std = [ "ismp-tendermint/std", "pallet-messaging-fees/std", "evm-state-machine/std", - "substrate-state-machine/std" + "substrate-state-machine/std", + "ismp-pharos/std" ] runtime-benchmarks = [ "hex-literal", diff --git a/parachain/runtimes/gargantua/src/ismp.rs b/parachain/runtimes/gargantua/src/ismp.rs index aebc07f77..dbe85fe26 100644 --- a/parachain/runtimes/gargantua/src/ismp.rs +++ b/parachain/runtimes/gargantua/src/ismp.rs @@ -163,6 +163,7 @@ impl pallet_ismp::Config for Runtime { ismp_optimism::OptimismConsensusClient, ismp_polygon::PolygonClient, ismp_tendermint::TendermintClient, + ismp_pharos::PharosClient, ); type OffchainDB = Mmr; type FeeHandler = pallet_ismp::fee_handler::WeightFeeHandler< diff --git a/tesseract/consensus/integration-tests/Cargo.toml b/tesseract/consensus/integration-tests/Cargo.toml index dac74cd0c..858523e89 100644 --- a/tesseract/consensus/integration-tests/Cargo.toml +++ b/tesseract/consensus/integration-tests/Cargo.toml @@ -51,6 +51,9 @@ substrate-state-machine = { workspace = true } arb-host = { workspace = true } op-host = { workspace = true } grandpa-verifier-primitives = { workspace = true } +tesseract-pharos = { workspace = true } +ismp-pharos = { workspace = true } +pharos-primitives = { workspace = true } [dependencies.polkadot-sdk] workspace = true diff --git a/tesseract/consensus/integration-tests/src/lib.rs b/tesseract/consensus/integration-tests/src/lib.rs index 80449684a..20442adff 100644 --- a/tesseract/consensus/integration-tests/src/lib.rs +++ b/tesseract/consensus/integration-tests/src/lib.rs @@ -8,6 +8,7 @@ mod ping; // mod substrate; //mod l2s; mod util; +//mod pharos; // use std::{ // sync::Arc, diff --git a/tesseract/consensus/integration-tests/src/pharos.rs b/tesseract/consensus/integration-tests/src/pharos.rs new file mode 100644 index 000000000..17dbc9ad1 --- /dev/null +++ b/tesseract/consensus/integration-tests/src/pharos.rs @@ -0,0 +1,94 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::Encode; +use ismp::{host::StateMachine, messaging::CreateConsensusState}; +use ismp_pharos::PHAROS_CONSENSUS_CLIENT_ID; +use std::sync::Arc; +use substrate_state_machine::HashAlgorithm; +use subxt_utils::Hyperbridge; +use tesseract_evm::EvmConfig; +use pharos_primitives::Config; +use tesseract_pharos::{PharosHost, PharosHostConfig, Testnet}; +use tesseract_primitives::IsmpHost; +use tesseract_substrate::{SubstrateClient, SubstrateConfig}; + +use crate::util::setup_logging; + +#[tokio::test] +async fn pharos_consensus_updates() -> anyhow::Result<()> { + setup_logging(); + dotenv::dotenv().ok(); + + let pharos_rpc_url = + std::env::var("PHAROS_RPC_URL").expect("PHAROS_RPC_URL must be set"); + + let evm_config = EvmConfig { + rpc_urls: vec![pharos_rpc_url.clone()], + state_machine: StateMachine::Evm(688689), + consensus_state_id: "PHAR".to_string(), + ismp_host: Default::default(), + signer: "2e0834786285daccd064ca17f1654f67b4aef298acbb82cef9ec422fb4975622".to_string(), + tracing_batch_size: None, + query_batch_size: None, + poll_interval: None, + gas_price_buffer: None, + client_type: Default::default(), + initial_height: None, + transport: Default::default(), + }; + + let host_config = PharosHostConfig { + consensus_update_frequency: Some(300), + }; + + let pharos_host = PharosHost::::new(&host_config, &evm_config).await?; + + let config_a = SubstrateConfig { + state_machine: StateMachine::Kusama(2000), + hashing: Some(HashAlgorithm::Keccak), + consensus_state_id: Some("PARA".to_string()), + rpc_ws: "ws://localhost:9990".to_string(), + max_rpc_payload_size: None, + signer: "0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a".to_string(), + initial_height: None, + max_concurent_queries: None, + poll_interval: None, + fee_token_decimals: None, + }; + let chain_a = SubstrateClient::::new(config_a).await?; + + println!("getting initial consensus state"); + let initial_consensus_state = pharos_host.get_consensus_state().await?; + + println!("creating initial consensus state"); + + chain_a + .create_consensus_state(CreateConsensusState { + consensus_state: initial_consensus_state.encode(), + consensus_client_id: PHAROS_CONSENSUS_CLIENT_ID, + consensus_state_id: *b"PHAR", + unbonding_period: Testnet::UNBONDING_PERIOD, + challenge_periods: vec![(StateMachine::Evm(688689), 5 * 60)].into_iter().collect(), + state_machine_commitments: vec![], + }) + .await?; + + println!("created consensus state"); + + pharos_host.start_consensus(Arc::new(chain_a)).await?; + + Ok(()) +} diff --git a/tesseract/consensus/op-host/src/host.rs b/tesseract/consensus/op-host/src/host.rs index 7d4b90580..f10d69ca2 100644 --- a/tesseract/consensus/op-host/src/host.rs +++ b/tesseract/consensus/op-host/src/host.rs @@ -14,7 +14,7 @@ use ismp_optimism::{ ConsensusState, OptimismConsensusProof, OptimismConsensusType, OptimismUpdate, OPTIMISM_CONSENSUS_CLIENT_ID, }; -use op_verifier::{calculate_output_root, CANNON, _PERMISSIONED}; +use op_verifier::{calculate_output_root, _PERMISSIONED, CANNON}; use reqwest::Url; use sp_core::{bytes::from_hex, Encode, H160, H256, U256}; use sync_committee_primitives::consensus_types::{BeaconBlockHeader, Checkpoint}; diff --git a/tesseract/consensus/pharos/Cargo.toml b/tesseract/consensus/pharos/Cargo.toml new file mode 100644 index 000000000..e89de8b13 --- /dev/null +++ b/tesseract/consensus/pharos/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "tesseract-pharos" +version = "0.1.0" +edition = "2021" +description = "Pharos consensus relayer for tesseract" +authors = ["Polytope Labs "] + +[dependencies] +pharos-prover = { path = "../../../modules/consensus/pharos/prover" } +pharos-primitives = { path = "../../../modules/consensus/pharos/primitives" } +pharos-verifier = { path = "../../../modules/consensus/pharos/verifier" } +ismp-pharos = { path = "../../../modules/ismp/clients/pharos" } +geth-primitives = { workspace = true } +ismp = { workspace = true } + +serde = { version = "1.0", features = ["derive"] } +serde_json = { package = "serde_json", version = "1.0" } +log = { workspace = true, default-features = true } +anyhow = { workspace = true, default-features = true } +codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +async-trait = "0.1.88" +tokio = { version = "1.27.0", features = ["rt-multi-thread", "macros"] } +primitive-types = { workspace = true } +sp-core = { workspace = true } + +tesseract-primitives = { workspace = true } +tesseract-evm = { workspace = true } diff --git a/tesseract/consensus/pharos/src/lib.rs b/tesseract/consensus/pharos/src/lib.rs new file mode 100644 index 000000000..f30802d04 --- /dev/null +++ b/tesseract/consensus/pharos/src/lib.rs @@ -0,0 +1,268 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tesseract consensus relayer for Pharos Network. + +use anyhow::Result; +use codec::Encode; +use ismp::{ + consensus::{ConsensusStateId, StateCommitment}, + host::StateMachine, + messaging::{ConsensusMessage, CreateConsensusState, Message, StateCommitmentHeight}, +}; +use ismp_pharos::{ConsensusState, PHAROS_CONSENSUS_CLIENT_ID}; +use pharos_primitives::Config; +use pharos_prover::PharosProver; +use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, sync::Arc, time::Duration}; +use tesseract_evm::{EvmClient, EvmConfig}; +use tesseract_primitives::{IsmpHost, IsmpProvider}; + +mod notification; + +pub use pharos_primitives::{Mainnet, Testnet}; + +/// Host configuration for Pharos relayer +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PharosHostConfig { + /// Frequency (in seconds) to check for new updates + pub consensus_update_frequency: Option, +} + +/// Top-level config for Pharos relayer +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PharosConfig { + /// Host configuration options + pub host: PharosHostConfig, + /// General EVM config + #[serde(flatten)] + pub evm_config: EvmConfig, +} + +impl PharosConfig { + /// Convert the config into a client. + pub async fn into_client(self) -> anyhow::Result> { + Ok(Arc::new(PharosHost::::new(&self.host, &self.evm_config).await?)) + } + + pub fn state_machine(&self) -> StateMachine { + self.evm_config.state_machine + } +} + +/// The relayer host for Pharos +#[derive(Clone)] +pub struct PharosHost { + /// Consensus state id on counterparty chain + pub consensus_state_id: ConsensusStateId, + /// State machine Identifier for this chain + pub state_machine: StateMachine, + /// Host config options + pub host: PharosHostConfig, + /// Ismp provider + pub provider: Arc, + /// Pharos prover for fetching proofs + pub prover: PharosProver, + /// Phantom data for config + _config: PhantomData, +} + +impl PharosHost { + /// Create a new PharosHost + pub async fn new(host: &PharosHostConfig, evm: &EvmConfig) -> Result { + let ismp_provider = EvmClient::new(evm.clone()).await?; + let rpc_url = evm + .rpc_urls + .first() + .ok_or_else(|| anyhow::anyhow!("No RPC URL configured in EVM config"))?; + let prover = PharosProver::new(rpc_url).await?; + + Ok(Self { + consensus_state_id: { + let mut consensus_state_id: ConsensusStateId = Default::default(); + consensus_state_id.copy_from_slice(evm.consensus_state_id.as_bytes()); + consensus_state_id + }, + state_machine: evm.state_machine, + host: host.clone(), + provider: Arc::new(ismp_provider), + prover, + _config: PhantomData, + }) + } + + /// Fetch the current consensus state (for initial state creation) + pub async fn get_consensus_state(&self) -> Result { + let latest_block = self.prover.get_latest_block().await?; + let update = self.prover.fetch_block_update(latest_block).await?; + + let header = &update.header; + let header_hash = geth_primitives::Header::from(header).hash::(); + + let current_epoch = C::compute_epoch(latest_block); + + // try to get validator set for an epoch boundary + // else, we query the previous epoch boundary + let validator_set = if let Some(ref proof) = update.validator_set_proof { + pharos_verifier::state_proof::verify_validator_set_proof::( + header.state_root, + proof, + current_epoch + 1, + )? + } else { + // For initial state, fetch from an epoch boundary block + let epoch_start = current_epoch * self.prover.epoch_length; + let epoch_boundary = if epoch_start > 0 { epoch_start - 1 } else { 0 }; + let boundary_update = self.prover.fetch_block_update(epoch_boundary).await?; + + if let Some(ref proof) = boundary_update.validator_set_proof { + pharos_verifier::state_proof::verify_validator_set_proof::( + boundary_update.header.state_root, + proof, + current_epoch, + )? + } else { + return Err(anyhow::anyhow!("Cannot get initial validator set")); + } + }; + + let chain_id = match self.state_machine { + StateMachine::Evm(chain_id) => chain_id, + _ => return Err(anyhow::anyhow!("Unsupported state machine")), + }; + + Ok(ConsensusState { + current_validators: validator_set, + finalized_height: latest_block, + finalized_hash: header_hash, + current_epoch, + chain_id, + }) + } +} + +/// Keccak256 hasher implementation +pub struct KeccakHasher; + +impl ismp::messaging::Keccak256 for KeccakHasher { + fn keccak256(bytes: &[u8]) -> primitive_types::H256 + where + Self: Sized, + { + sp_core::keccak_256(bytes).into() + } +} + +#[async_trait::async_trait] +impl IsmpHost for PharosHost { + async fn start_consensus( + &self, + counterparty: Arc, + ) -> Result<(), anyhow::Error> { + use crate::notification::consensus_notification; + + let interval = tokio::time::interval(Duration::from_secs( + self.host.consensus_update_frequency.unwrap_or(300), + )); + + let client = self.clone(); + let counterparty_clone = counterparty.clone(); + let mut interval = Box::pin(interval); + let provider = self.provider(); + + loop { + interval.as_mut().tick().await; + + match consensus_notification(&client, counterparty_clone.clone()).await { + Ok(Some(update)) => { + let consensus_message = ConsensusMessage { + consensus_proof: update.encode(), + consensus_state_id: client.consensus_state_id, + signer: counterparty.address(), + }; + + log::info!( + target: "tesseract", + "Transmitting consensus message from {} to {}", + provider.name(), + counterparty.name() + ); + + let res = counterparty + .submit( + vec![Message::Consensus(consensus_message)], + counterparty.state_machine_id().state_id, + ) + .await; + + if let Err(err) = res { + log::error!( + "Failed to submit transaction to {}: {err:?}", + counterparty.name() + ) + } + }, + Ok(None) => { + // No update to send, just continue + }, + Err(e) => { + log::error!( + target: "tesseract", + "Consensus task {}->{} encountered an error: {e:?}", + provider.name(), + counterparty.name() + ) + }, + } + } + } + + async fn query_initial_consensus_state( + &self, + ) -> Result, anyhow::Error> { + let initial_consensus_state = self.get_consensus_state().await.map_err(|e| { + anyhow::anyhow!("PharosHost: fetch initial consensus state failed: {e}") + })?; + + let latest_block = self.prover.get_latest_block().await?; + let update = self.prover.fetch_block_update(latest_block).await?; + + Ok(Some(CreateConsensusState { + consensus_state: initial_consensus_state.encode(), + consensus_client_id: PHAROS_CONSENSUS_CLIENT_ID, + consensus_state_id: self.consensus_state_id, + unbonding_period: C::UNBONDING_PERIOD, + challenge_periods: vec![(self.state_machine, 5 * 60)].into_iter().collect(), + state_machine_commitments: vec![( + ismp::consensus::StateMachineId { + state_id: self.state_machine, + consensus_state_id: self.consensus_state_id, + }, + StateCommitmentHeight { + commitment: StateCommitment { + timestamp: update.header.timestamp, + overlay_root: None, + state_root: update.header.state_root, + }, + height: latest_block, + }, + )], + })) + } + + fn provider(&self) -> Arc { + self.provider.clone() + } +} diff --git a/tesseract/consensus/pharos/src/notification.rs b/tesseract/consensus/pharos/src/notification.rs new file mode 100644 index 000000000..39e74b8d4 --- /dev/null +++ b/tesseract/consensus/pharos/src/notification.rs @@ -0,0 +1,90 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Consensus notification logic for Pharos relayer. + +use crate::PharosHost; +use codec::Decode; +use ismp_pharos::ConsensusState; +use pharos_primitives::{Config, VerifierStateUpdate}; +use std::sync::Arc; +use tesseract_primitives::IsmpProvider; + +pub async fn consensus_notification( + client: &PharosHost, + counterparty: Arc, +) -> Result, anyhow::Error> { + let counterparty_finalized = counterparty.query_finalized_height().await?; + let consensus_state_bytes = counterparty + .query_consensus_state(Some(counterparty_finalized), client.consensus_state_id) + .await?; + + let consensus_state = ConsensusState::decode(&mut &consensus_state_bytes[..]) + .map_err(|e| anyhow::anyhow!("Failed to decode consensus state: {:?}", e))?; + + let latest_block = client.prover.get_latest_block().await?; + + if latest_block <= consensus_state.finalized_height { + log::trace!( + target: "tesseract-pharos", + "No new blocks to sync. Latest: {}, Finalized: {}", + latest_block, + consensus_state.finalized_height + ); + return Ok(None); + } + + let current_epoch = consensus_state.current_epoch; + let latest_epoch = C::compute_epoch(latest_block); + + log::trace!( + target: "tesseract-pharos", + "New block available. Latest: {} (epoch {}), Finalized: {} (epoch {})", + latest_block, + latest_epoch, + consensus_state.finalized_height, + current_epoch + ); + + // Determine the target block for the update + // If we're still in the same epoch, just get the latest block + // If we've crossed epoch boundaries, we need to sync epoch by epoch + let target_block = if latest_epoch > current_epoch { + // We've crossed epoch boundaries — sync the first epoch boundary block. + // Since latest_block is in a later epoch, this boundary is always <= latest_block. + let next_epoch_boundary = (current_epoch + 1) * client.prover.epoch_length - 1; + log::trace!( + target: "tesseract-pharos", + "Syncing epoch boundary block {} for epoch transition {} -> {}", + next_epoch_boundary, + current_epoch, + current_epoch + 1 + ); + next_epoch_boundary + } else { + latest_block + }; + + let update = client.prover.fetch_block_update(target_block).await?; + + log::trace!( + target: "tesseract-pharos", + "Fetched update for block {}{}", + target_block, + if update.validator_set_proof.is_some() { " (with validator set proof)" } else { "" } + ); + + Ok(Some(update)) +} diff --git a/tesseract/consensus/sync-committee/src/host.rs b/tesseract/consensus/sync-committee/src/host.rs index 3b2dda0f8..9106ddc4e 100644 --- a/tesseract/consensus/sync-committee/src/host.rs +++ b/tesseract/consensus/sync-committee/src/host.rs @@ -28,7 +28,7 @@ use std::{collections::BTreeMap, sync::Arc}; use sync_committee_primitives::{constants::Config, util::compute_sync_committee_period}; use crate::notification::consensus_notification; -use op_verifier::{CANNON, _PERMISSIONED}; +use op_verifier::{_PERMISSIONED, CANNON}; use tesseract_primitives::{IsmpHost, IsmpProvider}; #[async_trait::async_trait] diff --git a/tesseract/messaging/config/Cargo.toml b/tesseract/messaging/config/Cargo.toml index d7224242b..b2386f5d5 100644 --- a/tesseract/messaging/config/Cargo.toml +++ b/tesseract/messaging/config/Cargo.toml @@ -10,6 +10,7 @@ tesseract-primitives = { workspace = true } tesseract-evm = { workspace = true } tesseract-evm-tendermint = { workspace = true } tesseract-substrate-evm = { workspace = true } +tesseract-pharos-evm = { workspace = true } tesseract-tron = { workspace = true } tendermint-primitives = { workspace = true, default-features = false } diff --git a/tesseract/messaging/config/src/lib.rs b/tesseract/messaging/config/src/lib.rs index f0317592e..e3db02acd 100644 --- a/tesseract/messaging/config/src/lib.rs +++ b/tesseract/messaging/config/src/lib.rs @@ -18,6 +18,7 @@ use substrate_state_machine::HashAlgorithm; use tendermint_primitives::keys::{DefaultEvmKeys, SeiEvmKeys}; use tesseract_evm::{EvmClient, EvmConfig}; use tesseract_evm_tendermint::{TendermintEvmClient, TendermintEvmClientConfig}; +use tesseract_pharos_evm::PharosEvmClient; use tesseract_primitives::IsmpProvider; use tesseract_substrate::{ config::{Blake2SubstrateChain, KeccakSubstrateChain}, @@ -38,6 +39,8 @@ pub enum AnyConfig { Tendermint(TendermintEvmClientConfig), /// Configuration for substrate-evm(revive) based chains SubstrateEvm(SubstrateEvmClientConfig), + /// Configuration for Pharos EVM chains + PharosEvm(EvmConfig), /// Configuration for Tron chains Tron(TronConfig), } @@ -49,6 +52,7 @@ impl AnyConfig { Self::Evm(config) => config.state_machine, Self::Tendermint(tendermint_config) => tendermint_config.evm_config.state_machine, Self::SubstrateEvm(substrate_evm_config) => substrate_evm_config.evm.state_machine, + Self::PharosEvm(config) => config.state_machine, Self::Tron(config) => config.state_machine(), } } @@ -110,6 +114,11 @@ impl AnyConfig { client.set_latest_finalized_height(hyperbridge).await?; Arc::new(client) as Arc }, + AnyConfig::PharosEvm(config) => { + let mut client = PharosEvmClient::new(config).await?; + client.set_latest_finalized_height(hyperbridge).await?; + Arc::new(client) as Arc + }, AnyConfig::Tron(config) => { let mut client = TronClient::new(config).await?; client.set_latest_finalized_height(hyperbridge).await?; diff --git a/tesseract/messaging/evm/src/tx.rs b/tesseract/messaging/evm/src/tx.rs index 49301d809..e5cc4fe90 100644 --- a/tesseract/messaging/evm/src/tx.rs +++ b/tesseract/messaging/evm/src/tx.rs @@ -24,8 +24,8 @@ use ismp::{ use ismp_solidity_abi::{ evm_host::{PostRequestHandled, PostResponseHandled}, handler::{ - HandlerInstance, PostRequestLeaf, PostRequestMessage, - PostResponseLeaf, PostResponseMessage, Proof, StateMachineHeight, + HandlerInstance, PostRequestLeaf, PostRequestMessage, PostResponseLeaf, + PostResponseMessage, Proof, StateMachineHeight, }, }; use mmr_primitives::mmr_position_to_k_index; @@ -135,20 +135,17 @@ fn build_tx_request( pub fn get_chain_gas_limit(state_machine: StateMachine) -> u64 { match state_machine { - StateMachine::Evm(ARBITRUM_CHAIN_ID) | StateMachine::Evm(ARBITRUM_SEPOLIA_CHAIN_ID) => { - 32_000_000 - }, + StateMachine::Evm(ARBITRUM_CHAIN_ID) | StateMachine::Evm(ARBITRUM_SEPOLIA_CHAIN_ID) => + 32_000_000, StateMachine::Evm(GNOSIS_CHAIN_ID) | StateMachine::Evm(CHIADO_CHAIN_ID) => 16_000_000, // Gas limit is 10_000_000, we set our transaction gas limit to 40% of that StateMachine::Evm(SEI_CHAIN_ID) | StateMachine::Evm(SEI_TESTNET_CHAIN_ID) => 4_000_000, // Gas limit is 60_000_000, we set our transaction gas limit to 30% of that - StateMachine::Evm(CRONOS_CHAIN_ID) | StateMachine::Evm(CRONOS_TESTNET_CHAIN_ID) => { - 18_000_000 - }, + StateMachine::Evm(CRONOS_CHAIN_ID) | StateMachine::Evm(CRONOS_TESTNET_CHAIN_ID) => + 18_000_000, // Gas limit is 50_000_000, we set our transaction gas limit to 30% of that - StateMachine::Evm(INJECTIVE_CHAIN_ID) | StateMachine::Evm(INJECTIVE_TESTNET_CHAIN_ID) => { - 15_000_000 - }, + StateMachine::Evm(INJECTIVE_CHAIN_ID) | StateMachine::Evm(INJECTIVE_TESTNET_CHAIN_ID) => + 15_000_000, // Ethereum L1 max's gas limit per transaction will be reduced to 16m soon. StateMachine::Evm(_) => 16_000_000, _ => Default::default(), @@ -294,9 +291,7 @@ pub async fn generate_contract_calls( datagram: RequestResponse::Request(..), .. }) => return Err(anyhow!("Get requests are not supported by relayer")), - Message::Timeout(_) => { - return Err(anyhow!("Timeout messages not supported by relayer")) - }, + Message::Timeout(_) => return Err(anyhow!("Timeout messages not supported by relayer")), Message::FraudProof(_) => return Err(anyhow!("Unexpected fraud proof message")), }; @@ -410,7 +405,7 @@ pub async fn wait_for_success( tx_hash: H256, ) -> anyhow::Result>> { match wait_for_transaction_receipt(tx_hash, client).await? { - Some(receipt) => { + Some(receipt) => if receipt.inner.status_or_post_state() == Eip658Value::Eip658(true) { tracing::info!("Tx for {:?} succeeded", client.state_machine); Ok(Some(extract_event_commitments(&receipt))) @@ -421,8 +416,7 @@ pub async fn wait_for_success( client.state_machine ); Err(anyhow!("Transaction reverted")) - } - }, + }, None => Ok(None), } } @@ -437,7 +431,7 @@ pub async fn handle_message_submission( for msg in messages { match msg { - Message::Request(req_msg) => { + Message::Request(req_msg) => for post in req_msg.requests { let req = Request::Post(post); let commitment = hash_request::(&req); @@ -452,12 +446,11 @@ pub async fn handle_message_submission( height, }); } - } - }, + }, Message::Response(ResponseMessage { datagram: RequestResponse::Response(resp), .. - }) => { + }) => for res in resp { let commitment = hash_response::(&res); let request_commitment = hash_request::(&res.request()); @@ -473,8 +466,7 @@ pub async fn handle_message_submission( height, }); } - } - }, + }, _ => {}, } } diff --git a/tesseract/messaging/fees/src/db.rs b/tesseract/messaging/fees/src/db.rs index c31485de4..32d6556c5 100644 --- a/tesseract/messaging/fees/src/db.rs +++ b/tesseract/messaging/fees/src/db.rs @@ -19,8 +19,8 @@ pub mod deliveries { pub const NAME: &str = "Deliveries"; pub mod id { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "id"; pub struct Set(pub i32); @@ -89,8 +89,8 @@ pub mod deliveries { } pub mod hash { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "hash"; pub struct Set(pub String); @@ -150,8 +150,8 @@ pub mod deliveries { } pub mod source_chain { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "source_chain"; pub struct Set(pub String); @@ -215,8 +215,8 @@ pub mod deliveries { } pub mod dest_chain { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "dest_chain"; pub struct Set(pub String); @@ -280,8 +280,8 @@ pub mod deliveries { } pub mod delivery_type { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "delivery_type"; pub struct Set(pub i32); @@ -354,8 +354,8 @@ pub mod deliveries { } pub mod created_at { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "created_at"; pub struct Set(pub i32); @@ -428,8 +428,8 @@ pub mod deliveries { } pub mod height { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "height"; pub struct Set(pub i32); @@ -1073,8 +1073,8 @@ pub mod pending_withdrawal { pub const NAME: &str = "PendingWithdrawal"; pub mod id { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "id"; pub struct Set(pub i32); @@ -1143,8 +1143,8 @@ pub mod pending_withdrawal { } pub mod dest { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "dest"; pub struct Set(pub String); @@ -1204,8 +1204,8 @@ pub mod pending_withdrawal { } pub mod encoded { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "encoded"; pub struct Set(pub Vec); @@ -1614,8 +1614,8 @@ pub mod unprofitable_messages { pub const NAME: &str = "UnprofitableMessages"; pub mod id { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "id"; pub struct Set(pub i32); @@ -1684,8 +1684,8 @@ pub mod unprofitable_messages { } pub mod dest { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "dest"; pub struct Set(pub String); @@ -1745,8 +1745,8 @@ pub mod unprofitable_messages { } pub mod encoded { use super::{ - super::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, WhereParam, - WithParam, _prisma::*, + super::*, _prisma::*, OrderByParam, SetParam, UncheckedSetParam, UniqueWhereParam, + WhereParam, WithParam, }; pub const NAME: &str = "encoded"; pub struct Set(pub Vec); diff --git a/tesseract/messaging/pharos-evm/Cargo.toml b/tesseract/messaging/pharos-evm/Cargo.toml new file mode 100644 index 000000000..6c9f0a04b --- /dev/null +++ b/tesseract/messaging/pharos-evm/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "tesseract-pharos-evm" +version = "0.1.0" +edition = "2024" +description = "Pharos EVM client for ISMP messaging relay" +authors = ["Polytope Labs "] +publish = false + +[dependencies] +log = { workspace = true } +anyhow = { workspace = true } +async-trait = "0.1.71" +primitive-types = { workspace = true, default-features = true } +sp-core = { workspace = true, features = ["full_crypto"] } +codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } + +ismp = { workspace = true, default-features = true } +tesseract-primitives = { workspace = true, default-features = true } +tesseract-evm = { workspace = true, default-features = true } +pharos-prover = { workspace = true, default-features = true } +pharos-primitives = { workspace = true, default-features = true } +pharos-state-machine = { workspace = true, default-features = true } +pallet-ismp-host-executive = { workspace = true, default-features = true } +serde = { workspace = true, features = ["derive", "alloc"] } diff --git a/tesseract/messaging/pharos-evm/src/lib.rs b/tesseract/messaging/pharos-evm/src/lib.rs new file mode 100644 index 000000000..58919d9cd --- /dev/null +++ b/tesseract/messaging/pharos-evm/src/lib.rs @@ -0,0 +1,429 @@ +// Copyright (C) Polytope Labs Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use anyhow::Error; +use codec::Encode; +use ismp::{ + consensus::{ConsensusStateId, StateCommitment, StateMachineHeight, StateMachineId}, + events::{Event, StateCommitmentVetoed}, + host::StateMachine, + messaging::{CreateConsensusState, Message}, +}; +use pallet_ismp_host_executive::HostParam; +use pharos_primitives::{NonExistenceProof, PharosProofNode}; +use pharos_prover::{ + rpc::{PharosRpcClient, RpcAccountProof, hex_to_bytes}, + rpc_to_proof_nodes, rpc_to_sibling_proofs, +}; +use pharos_state_machine::AccountProofData; +use primitive_types::{H160, H256, U256}; +use tesseract_evm::{EvmClient, EvmConfig}; +use tesseract_primitives::{ + BoxStream, ByzantineHandler, EstimateGasReturnParams, IsmpProvider, Query, Signature, + StateMachineUpdated, StateProofQueryType, TxResult, +}; + +use pharos_state_machine::PharosStateProof; + +#[derive(Clone)] +pub struct PharosEvmClient { + pub evm: EvmClient, + pub rpc: Arc, +} + +impl PharosEvmClient { + pub async fn new(config: EvmConfig) -> Result { + let rpc_url = config + .rpc_urls + .first() + .ok_or_else(|| anyhow::anyhow!("No RPC URL configured"))?; + let rpc = Arc::new( + PharosRpcClient::new(rpc_url).map_err(|e| anyhow::anyhow!("RPC init failed: {e:?}"))?, + ); + let evm = EvmClient::new(config).await?; + Ok(Self { evm, rpc }) + } + + /// Fetch a Pharos state proof for the given storage keys at the given block. + /// Handles both existence and non-existence proofs from the RPC response. + async fn fetch_pharos_proof( + &self, + at: u64, + address: H160, + slot_hashes: Vec, + ) -> Result, Error> { + let rpc_proof = self + .rpc + .get_proof(address, slot_hashes, at) + .await + .map_err(|e| anyhow::anyhow!("eth_getProof failed: {e:?}"))?; + + let mut storage_proof = BTreeMap::new(); + let mut storage_values = BTreeMap::new(); + let mut non_existence_proofs = BTreeMap::new(); + + for sp in &rpc_proof.storage_proof { + let key_bytes = + hex_to_bytes(&sp.key).map_err(|e| anyhow::anyhow!("hex decode key: {e:?}"))?; + let mut slot_key = [0u8; 32]; + if key_bytes.len() <= 32 { + slot_key[32 - key_bytes.len()..].copy_from_slice(&key_bytes); + } + let slot_vec = slot_key.to_vec(); + + if sp.is_exist { + let proof_nodes = rpc_to_proof_nodes(&sp.proof) + .map_err(|e| anyhow::anyhow!("proof node conversion: {e:?}"))?; + let value_bytes = hex_to_bytes(&sp.value) + .map_err(|e| anyhow::anyhow!("hex decode value: {e:?}"))?; + let mut padded = [0u8; 32]; + if value_bytes.len() <= 32 { + padded[32 - value_bytes.len()..].copy_from_slice(&value_bytes); + } + storage_proof.insert(slot_vec.clone(), proof_nodes); + storage_values.insert(slot_vec, padded.to_vec()); + } else { + let proof_nodes = rpc_to_proof_nodes(&sp.proof) + .map_err(|e| anyhow::anyhow!("proof node conversion: {e:?}"))?; + let sibling_proofs = rpc_to_sibling_proofs(&sp.sibling_leftmost_leaf_proofs) + .map_err(|e| anyhow::anyhow!("sibling proof conversion: {e:?}"))?; + non_existence_proofs + .insert(slot_vec, NonExistenceProof { proof_nodes, sibling_proofs }); + } + } + + let pharos_proof = PharosStateProof { + storage_proof, + storage_values, + non_existence_proofs, + account_proofs: BTreeMap::new(), + }; + Ok(pharos_proof.encode()) + } + + async fn fetch_account_proof(&self, at: u64, address: H160) -> Result { + let rpc_proof = self + .rpc + .get_proof(address, vec![], at) + .await + .map_err(|e| anyhow::anyhow!("eth_getProof failed: {e:?}"))?; + + let proof_nodes = rpc_to_proof_nodes(&rpc_proof.account_proof) + .map_err(|e| anyhow::anyhow!("account proof conversion: {e:?}"))?; + let raw_value = + hex_to_bytes(&rpc_proof.raw_value).map_err(|e| anyhow::anyhow!("hex decode: {e:?}"))?; + + Ok(AccountProofData { proof_nodes, raw_value }) + } +} + +#[async_trait::async_trait] +impl IsmpProvider for PharosEvmClient { + async fn query_consensus_state( + &self, + at: Option, + id: ConsensusStateId, + ) -> Result, Error> { + self.evm.query_consensus_state(at, id).await + } + + async fn query_latest_height(&self, id: StateMachineId) -> Result { + self.evm.query_latest_height(id).await + } + + async fn query_finalized_height(&self) -> Result { + self.evm.query_finalized_height().await + } + + async fn query_state_machine_commitment( + &self, + height: StateMachineHeight, + ) -> Result { + self.evm.query_state_machine_commitment(height).await + } + + async fn query_state_machine_update_time( + &self, + height: StateMachineHeight, + ) -> Result { + self.evm.query_state_machine_update_time(height).await + } + + async fn query_challenge_period(&self, id: StateMachineId) -> Result { + self.evm.query_challenge_period(id).await + } + + async fn query_timestamp(&self) -> Result { + self.evm.query_timestamp().await + } + + async fn query_requests_proof( + &self, + at: u64, + keys: Vec, + _counterparty: StateMachine, + ) -> Result, Error> { + let slot_hashes: Vec = keys + .into_iter() + .map(|q| self.evm.request_commitment_key(q.commitment).1) + .collect(); + self.fetch_pharos_proof(at, self.evm.config.ismp_host, slot_hashes).await + } + + async fn query_responses_proof( + &self, + at: u64, + keys: Vec, + _counterparty: StateMachine, + ) -> Result, Error> { + let slot_hashes: Vec = keys + .into_iter() + .map(|q| self.evm.response_commitment_key(q.commitment).1) + .collect(); + self.fetch_pharos_proof(at, self.evm.config.ismp_host, slot_hashes).await + } + + async fn query_state_proof( + &self, + at: u64, + keys: StateProofQueryType, + ) -> Result, Error> { + match keys { + StateProofQueryType::Ismp(keys) => { + let slot_hashes: Vec = + keys.into_iter().map(|k| H256::from_slice(&k)).collect(); + self.fetch_pharos_proof(at, self.evm.config.ismp_host, slot_hashes).await + }, + StateProofQueryType::Arbitrary(keys) => { + // For arbitrary keys, group by contract address and fetch per-contract proofs + // Then merge into a single PharosStateProof + let mut storage_proof = BTreeMap::new(); + let mut storage_values = BTreeMap::new(); + let mut non_existence_proofs = BTreeMap::new(); + let mut account_proofs = BTreeMap::new(); + + let mut groups: BTreeMap> = BTreeMap::new(); + let mut account_queries: Vec = Vec::new(); + for key in &keys { + if key.len() == 52 { + let address = H160::from_slice(&key[..20]); + let slot = H256::from_slice(&key[20..]); + groups.entry(address).or_default().push(slot); + } else if key.len() == 20 { + account_queries.push(H160::from_slice(key)); + } + } + + for address in account_queries { + let data = self.fetch_account_proof(at, address).await?; + account_proofs.insert(address.0.to_vec(), data); + } + + for (address, slots) in groups { + let rpc_proof = self + .rpc + .get_proof(address, slots, at) + .await + .map_err(|e| anyhow::anyhow!("eth_getProof failed: {e:?}"))?; + + for sp in &rpc_proof.storage_proof { + let key_bytes = hex_to_bytes(&sp.key) + .map_err(|e| anyhow::anyhow!("hex decode: {e:?}"))?; + let mut slot_key = [0u8; 32]; + if key_bytes.len() <= 32 { + slot_key[32 - key_bytes.len()..].copy_from_slice(&key_bytes); + } + let slot_vec = slot_key.to_vec(); + + if sp.is_exist { + let nodes = rpc_to_proof_nodes(&sp.proof) + .map_err(|e| anyhow::anyhow!("{e:?}"))?; + let val = + hex_to_bytes(&sp.value).map_err(|e| anyhow::anyhow!("{e:?}"))?; + let mut padded = [0u8; 32]; + if val.len() <= 32 { + padded[32 - val.len()..].copy_from_slice(&val); + } + storage_proof.insert(slot_vec.clone(), nodes); + storage_values.insert(slot_vec, padded.to_vec()); + } else { + let nodes = rpc_to_proof_nodes(&sp.proof) + .map_err(|e| anyhow::anyhow!("{e:?}"))?; + let siblings = rpc_to_sibling_proofs(&sp.sibling_leftmost_leaf_proofs) + .map_err(|e| anyhow::anyhow!("{e:?}"))?; + non_existence_proofs.insert( + slot_vec, + NonExistenceProof { proof_nodes: nodes, sibling_proofs: siblings }, + ); + } + } + } + + let pharos_proof = PharosStateProof { + storage_proof, + storage_values, + non_existence_proofs, + account_proofs, + }; + Ok(pharos_proof.encode()) + }, + } + } + + async fn query_ismp_events( + &self, + previous_height: u64, + event: StateMachineUpdated, + ) -> Result, Error> { + self.evm.query_ismp_events(previous_height, event).await + } + + fn name(&self) -> String { + self.evm.name() + } + + fn state_machine_id(&self) -> StateMachineId { + self.evm.state_machine_id() + } + + fn block_max_gas(&self) -> u64 { + self.evm.block_max_gas() + } + + fn initial_height(&self) -> u64 { + self.evm.initial_height() + } + + async fn estimate_gas(&self, msg: Vec) -> Result, Error> { + self.evm.estimate_gas(msg).await + } + + async fn query_request_fee_metadata(&self, hash: H256) -> Result { + self.evm.query_request_fee_metadata(hash).await + } + + async fn query_request_receipt(&self, hash: H256) -> Result, Error> { + self.evm.query_request_receipt(hash).await + } + + async fn query_response_receipt(&self, hash: H256) -> Result, Error> { + self.evm.query_response_receipt(hash).await + } + + async fn query_response_fee_metadata(&self, hash: H256) -> Result { + self.evm.query_response_fee_metadata(hash).await + } + + async fn state_machine_update_notification( + &self, + counterparty_state_id: StateMachineId, + ) -> Result, Error> { + self.evm.state_machine_update_notification(counterparty_state_id).await + } + + async fn state_commitment_vetoed_notification( + &self, + from: u64, + height: StateMachineHeight, + ) -> BoxStream { + self.evm.state_commitment_vetoed_notification(from, height).await + } + + async fn submit( + &self, + messages: Vec, + coprocessor: StateMachine, + ) -> Result { + self.evm.submit(messages, coprocessor).await + } + + fn request_commitment_full_key(&self, commitment: H256) -> Vec> { + self.evm.request_commitment_full_key(commitment) + } + + fn request_receipt_full_key(&self, commitment: H256) -> Vec> { + self.evm.request_receipt_full_key(commitment) + } + + fn response_commitment_full_key(&self, commitment: H256) -> Vec> { + self.evm.response_commitment_full_key(commitment) + } + + fn response_receipt_full_key(&self, commitment: H256) -> Vec> { + self.evm.response_receipt_full_key(commitment) + } + + fn address(&self) -> Vec { + self.evm.address() + } + + fn sign(&self, msg: &[u8]) -> Signature { + self.evm.sign(msg) + } + + async fn set_latest_finalized_height( + &mut self, + counterparty: Arc, + ) -> Result<(), Error> { + self.evm.set_latest_finalized_height(counterparty).await + } + + async fn set_initial_consensus_state( + &self, + message: CreateConsensusState, + ) -> Result<(), Error> { + self.evm.set_initial_consensus_state(message).await + } + + async fn veto_state_commitment(&self, height: StateMachineHeight) -> Result<(), Error> { + self.evm.veto_state_commitment(height).await + } + + async fn query_host_params( + &self, + state_machine: StateMachine, + ) -> Result, Error> { + self.evm.query_host_params(state_machine).await + } + + fn max_concurrent_queries(&self) -> usize { + self.evm.max_concurrent_queries() + } + + async fn fee_token_decimals(&self) -> Result { + self.evm.fee_token_decimals().await + } +} + +#[async_trait::async_trait] +impl ByzantineHandler for PharosEvmClient { + async fn check_for_byzantine_attack( + &self, + coprocessor: StateMachine, + counterparty: Arc, + event: StateMachineUpdated, + ) -> Result<(), Error> { + self.evm.check_for_byzantine_attack(coprocessor, counterparty, event).await + } + + async fn state_machine_updates( + &self, + counterparty_state_id: StateMachineId, + ) -> Result>, Error> { + self.evm.state_machine_updates(counterparty_state_id).await + } +}