diff --git a/Cargo.lock b/Cargo.lock index e375d903a3..8c974483f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7295,9 +7295,12 @@ dependencies = [ "clap 4.5.50", "clap_complete", "contracts-artifacts", + "dirs", "env_logger 0.10.2", "ethers", "ethers-contract", + "fendermint_actor_blobs_shared", + "fendermint_actor_bucket", "fendermint_app", "fendermint_app_settings", "fendermint_crypto", @@ -7306,6 +7309,7 @@ dependencies = [ "fendermint_eth_hardhat", "fendermint_rpc", "fendermint_vm_actor_interface", + "fendermint_vm_message", "fil_actors_runtime", "flate2", "fs-err", @@ -7354,6 +7358,7 @@ dependencies = [ "url", "urlencoding", "uuid 1.18.1", + "walkdir", "warp", "zeroize", ] @@ -11810,6 +11815,7 @@ dependencies = [ "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", diff --git a/docs/storage-cli-quickstart.md b/docs/storage-cli-quickstart.md new file mode 100644 index 0000000000..95ad9c8f82 --- /dev/null +++ b/docs/storage-cli-quickstart.md @@ -0,0 +1,118 @@ +# Storage CLI Quickstart + +This guide walks you through testing the IPC decentralized storage CLI on the test subnet. + +## Prerequisites + +Build the CLI (macOS, targeting the local machine): + +```bash +cargo build --release -p ipc-cli --features ipc-storage +``` + +Make sure you have an IPC wallet set up (`~/.ipc/config.toml` with an EVM key). +If not, create one: + +```bash +./target/release/ipc-cli wallet new --wallet-type evm +./target/release/ipc-cli wallet set-default --wallet-type evm --address <0xYOUR_ADDRESS> +``` + +## Step 1: Fund your account on the storage subnet + +Send tokens from the parent chain (calibnet) into the storage subnet: + +```bash +./target/release/ipc-cli cross-msg fund \ + --subnet "/r314159/t410fg32br4ow4kdhp3wssi6c4xumsdpjzhw6y4ydbxq" \ + --from 0xYOUR_ADDRESS \ + --to 0xYOUR_ADDRESS \ + 60 +``` + +Wait for the top-down message to be finalized (up to ~3 minutes), then verify your balance: + +```bash +curl http://136.115.12.207:8545 \ + -H 'content-type: application/json' \ + --data '{"jsonrpc":"2.0","method":"eth_getBalance","params":["0xYOUR_ADDRESS","latest"],"id":1}' +``` + +A non-zero `result` means your account is funded. + +## Step 2: Initialize the storage client + +```bash +./target/release/ipc-cli storage client init \ + --rpc-url http://136.115.12.207:26657 \ + --gateway-url http://136.115.12.207:8080 +``` + +This creates `~/.ipc/storage/client/config.yaml`. The CLI uses your default EVM wallet key for signing transactions. + +## Step 3: Run the test suite + +```bash +./test.sh +``` + +The script automatically: +1. Buys storage credit (0.1 FIL) +2. Creates a bucket (or reuses an existing one) +3. Tests all 18 operations: upload, list, stat, cat, download, recursive upload/download, move, delete + +Phase 1 (steps 2-12) tests read/write operations immediately. +Phase 2 (steps 13-18) waits 90 seconds for blob finalization, then tests move and delete. + +## Manual commands + +Once initialized, you can use any storage command directly: + +```bash +# Buy storage credit +./target/release/ipc-cli storage client credit buy 0.1 + +# Create a bucket +./target/release/ipc-cli storage client bucket create + +# List buckets +./target/release/ipc-cli storage client bucket list + +# Upload a file +./target/release/ipc-cli storage client cp /path/to/file.txt ipc://BUCKET/key.txt --gateway http://136.115.12.207:8080 + +# Upload a directory +./target/release/ipc-cli storage client cp -r /path/to/dir ipc://BUCKET/prefix --gateway http://136.115.12.207:8080 + +# List objects +./target/release/ipc-cli storage client ls ipc://BUCKET/ + +# Get object metadata +./target/release/ipc-cli storage client stat ipc://BUCKET/key.txt + +# Read file contents +./target/release/ipc-cli storage client cat ipc://BUCKET/key.txt --gateway http://136.115.12.207:8080 + +# Download a file +./target/release/ipc-cli storage client cp ipc://BUCKET/key.txt /local/path.txt --gateway http://136.115.12.207:8080 + +# Move/rename +./target/release/ipc-cli storage client mv ipc://BUCKET/old.txt ipc://BUCKET/new.txt --gateway http://136.115.12.207:8080 + +# Delete +./target/release/ipc-cli storage client rm --force ipc://BUCKET/key.txt + +# Delete recursively +./target/release/ipc-cli storage client rm -r --force ipc://BUCKET/prefix/ + +# Check credit info +./target/release/ipc-cli storage client credit info +``` + +Replace `BUCKET` with your bucket address (e.g. `t0123`). + +## Notes + +- **Blob finalization**: After uploading, blobs take ~10-15 seconds to be finalized by the storage node. Until finalized, delete and move operations will fail with "blob pending finalization". +- **Gateway URL**: The `--gateway` flag is required for commands that transfer data (cp, cat, mv). Read-only commands (ls, stat, credit info, bucket list) only need the RPC. +- **Overwrite**: Use `--overwrite` with `cp` to replace an existing object. diff --git a/fendermint/actors/blobs/shared/src/execution.rs b/fendermint/actors/blobs/shared/src/execution.rs new file mode 100644 index 0000000000..e57bb9c07e --- /dev/null +++ b/fendermint/actors/blobs/shared/src/execution.rs @@ -0,0 +1,89 @@ +// Copyright 2026 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use serde::{Deserialize, Serialize}; + +use crate::bytes::B256; + +// FEVM InvokeContract selectors used by blobs actor facade for execution methods. +pub const CREATE_JOB_SELECTOR: [u8; 4] = [0x6b, 0xa4, 0x8d, 0x87]; +pub const CLAIM_JOB_SELECTOR: [u8; 4] = [0x9c, 0x7d, 0xd2, 0x19]; +pub const COMPLETE_JOB_SELECTOR: [u8; 4] = [0x59, 0x2f, 0x72, 0xc4]; +pub const FAIL_JOB_SELECTOR: [u8; 4] = [0xf5, 0xe2, 0x2c, 0x70]; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub enum JobStatus { + Pending, + Claimed, + Running, + Succeeded, + Failed, + TimedOut, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExecutionJob { + pub id: u64, + pub creator: Address, + pub claimed_by: Option
, + pub status: JobStatus, + pub binary_ref: String, + pub input_refs: Vec, + pub args: Vec, + pub env: Vec<(String, String)>, + pub timeout_secs: u64, + pub created_epoch: ChainEpoch, + pub started_epoch: Option, + pub completed_epoch: Option, + pub output_refs: Vec, + pub output_commitment: Option, + pub exit_code: Option, + pub error: Option, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CreateJobParams { + pub binary_ref: String, + pub input_refs: Vec, + pub args: Vec, + pub env: Vec<(String, String)>, + pub timeout_secs: u64, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimJobParams { + pub id: u64, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CompleteJobParams { + pub id: u64, + pub output_refs: Vec, + pub output_commitment: B256, + pub exit_code: i32, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct FailJobParams { + pub id: u64, + pub reason: String, + pub exit_code: i32, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetJobParams { + pub id: u64, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListJobsParams { + pub status: Option, + pub limit: u32, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListJobsReturn { + pub jobs: Vec, +} diff --git a/fendermint/actors/blobs/shared/src/lib.rs b/fendermint/actors/blobs/shared/src/lib.rs index b5d78a0992..380cd4a9d0 100644 --- a/fendermint/actors/blobs/shared/src/lib.rs +++ b/fendermint/actors/blobs/shared/src/lib.rs @@ -13,6 +13,7 @@ pub mod accounts; pub mod blobs; pub mod bytes; pub mod credit; +pub mod execution; pub mod method; pub mod operators; pub mod sdk; diff --git a/fendermint/actors/blobs/shared/src/method.rs b/fendermint/actors/blobs/shared/src/method.rs index 3718f09132..cb39f62262 100644 --- a/fendermint/actors/blobs/shared/src/method.rs +++ b/fendermint/actors/blobs/shared/src/method.rs @@ -46,4 +46,12 @@ pub enum Method { RegisterNodeOperator = frc42_dispatch::method_hash!("RegisterNodeOperator"), GetOperatorInfo = frc42_dispatch::method_hash!("GetOperatorInfo"), GetActiveOperators = frc42_dispatch::method_hash!("GetActiveOperators"), + + // Execution methods (MVP in blobs actor) + CreateJob = frc42_dispatch::method_hash!("CreateJob"), + ClaimJob = frc42_dispatch::method_hash!("ClaimJob"), + CompleteJob = frc42_dispatch::method_hash!("CompleteJob"), + FailJob = frc42_dispatch::method_hash!("FailJob"), + GetJob = frc42_dispatch::method_hash!("GetJob"), + ListJobs = frc42_dispatch::method_hash!("ListJobs"), } diff --git a/fendermint/actors/blobs/src/actor.rs b/fendermint/actors/blobs/src/actor.rs index f9b928af8a..e773f94420 100644 --- a/fendermint/actors/blobs/src/actor.rs +++ b/fendermint/actors/blobs/src/actor.rs @@ -18,6 +18,7 @@ use crate::{ }; mod admin; +mod execution; mod metrics; mod system; mod user; @@ -52,7 +53,63 @@ impl BlobsActor { params: InvokeContractParams, ) -> Result { let input_data: InputData = params.try_into()?; - if sol_blobs::can_handle(&input_data) { + if sol_blobs::is_register_node_operator_call(&input_data) { + let params = sol_blobs::parse_register_node_operator_input(&input_data)?; + let params = fendermint_actor_blobs_shared::operators::RegisterNodeOperatorParams { + bls_pubkey: params.bls_pubkey, + rpc_url: params.rpc_url, + }; + let _ = Self::register_node_operator(rt, params)?; + Ok(InvokeContractReturn { + output_data: Vec::new(), + }) + } else if sol_blobs::is_get_operator_info_call(&input_data) { + let params = sol_blobs::parse_get_operator_info_input(&input_data)?; + let address = rt + .resolve_address(¶ms.address) + .map(fvm_shared::address::Address::new_id) + .unwrap_or(params.address); + let info = Self::get_operator_info( + rt, + fendermint_actor_blobs_shared::operators::GetOperatorInfoParams { address }, + )?; + let output_data = sol_blobs::encode_get_operator_info_output(info)?; + Ok(InvokeContractReturn { output_data }) + } else if sol_blobs::is_get_active_operators_call(&input_data) { + let operators = Self::get_active_operators(rt)?; + let output_data = sol_blobs::encode_get_active_operators_output(operators.operators)?; + Ok(InvokeContractReturn { output_data }) + } else if sol_blobs::is_create_job_call(&input_data) { + let params = sol_blobs::parse_create_job_input(&input_data)?; + let _ = Self::create_job(rt, params.into())?; + Ok(InvokeContractReturn { + output_data: Vec::new(), + }) + } else if sol_blobs::is_claim_job_call(&input_data) { + let params = sol_blobs::parse_claim_job_input(&input_data)?; + let _ = Self::claim_job(rt, params.into())?; + Ok(InvokeContractReturn { + output_data: Vec::new(), + }) + } else if sol_blobs::is_complete_job_call(&input_data) { + let params = sol_blobs::parse_complete_job_input(&input_data)?; + let _ = Self::complete_job(rt, params.into())?; + Ok(InvokeContractReturn { + output_data: Vec::new(), + }) + } else if sol_blobs::is_fail_job_call(&input_data) { + let params = sol_blobs::parse_fail_job_input(&input_data)?; + let _ = Self::fail_job(rt, params.into())?; + Ok(InvokeContractReturn { + output_data: Vec::new(), + }) + } else if sol_blobs::is_finalize_blob_call(&input_data) { + let params = sol_blobs::parse_finalize_blob_input(&input_data, rt)?; + Self::finalize_blob(rt, params)?; + Ok(InvokeContractReturn { + output_data: Vec::new(), + }) + } else if sol_blobs::can_handle(&input_data) { let output_data = match sol_blobs::parse_input(&input_data)? { sol_blobs::Calls::addBlob(call) => { let params = call.params(rt)?; @@ -213,6 +270,14 @@ impl ActorCode for BlobsActor { GetOperatorInfo => get_operator_info, GetActiveOperators => get_active_operators, + // Execution methods (MVP) + CreateJob => create_job, + ClaimJob => claim_job, + CompleteJob => complete_job, + FailJob => fail_job, + GetJob => get_job, + ListJobs => list_jobs, + _ => fallback, } } diff --git a/fendermint/actors/blobs/src/actor/execution.rs b/fendermint/actors/blobs/src/actor/execution.rs new file mode 100644 index 0000000000..134a3eb18c --- /dev/null +++ b/fendermint/actors/blobs/src/actor/execution.rs @@ -0,0 +1,72 @@ +// Copyright 2026 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::execution::{ + ClaimJobParams, CompleteJobParams, CreateJobParams, ExecutionJob, FailJobParams, GetJobParams, + ListJobsParams, ListJobsReturn, +}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; + +use crate::{actor::BlobsActor, State}; + +impl BlobsActor { + pub fn create_job( + rt: &impl Runtime, + params: CreateJobParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + let creator = rt.message().caller(); + rt.transaction(|st: &mut State, rt| st.create_job(creator, params.clone(), rt.curr_epoch())) + } + + pub fn claim_job( + rt: &impl Runtime, + params: ClaimJobParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + let worker = rt.message().caller(); + + // For MVP, execution permission is tied to active storage operators. + let is_active_operator = { + let state = rt.state::()?; + state.operators.get_index(&worker).is_some() + }; + if !is_active_operator { + return Err(ActorError::forbidden( + "caller is not an active storage operator".into(), + )); + } + + rt.transaction(|st: &mut State, rt| st.claim_job(worker, params.id, rt.curr_epoch())) + } + + pub fn complete_job( + rt: &impl Runtime, + params: CompleteJobParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + let worker = rt.message().caller(); + rt.transaction(|st: &mut State, rt| st.complete_job(worker, params.clone(), rt.curr_epoch())) + } + + pub fn fail_job(rt: &impl Runtime, params: FailJobParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + let worker = rt.message().caller(); + rt.transaction(|st: &mut State, rt| st.fail_job(worker, params.clone(), rt.curr_epoch())) + } + + pub fn get_job( + rt: &impl Runtime, + params: GetJobParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + let state = rt.state::()?; + Ok(state.get_job(params.id)) + } + + pub fn list_jobs(rt: &impl Runtime, params: ListJobsParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + let state = rt.state::()?; + Ok(state.list_jobs(params)) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/blobs.rs b/fendermint/actors/blobs/src/sol_facade/blobs.rs index 4f2426f337..165b8f261a 100644 --- a/fendermint/actors/blobs/src/sol_facade/blobs.rs +++ b/fendermint/actors/blobs/src/sol_facade/blobs.rs @@ -4,10 +4,15 @@ use fendermint_actor_blobs_shared::{ blobs::{ - AddBlobParams, Blob, BlobStatus, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, - TrimBlobExpiriesParams, + AddBlobParams, Blob, BlobStatus, DeleteBlobParams, FinalizeBlobParams, GetBlobParams, + OverwriteBlobParams, SubscriptionId, TrimBlobExpiriesParams, }, bytes::B256, + execution::{ + ClaimJobParams, CompleteJobParams, CreateJobParams, FailJobParams, CLAIM_JOB_SELECTOR, + COMPLETE_JOB_SELECTOR, CREATE_JOB_SELECTOR, FAIL_JOB_SELECTOR, + }, + operators::OperatorInfo, GetStatsReturn, }; use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; @@ -112,6 +117,606 @@ pub fn parse_input(input: &ipc_storage_actor_sdk::evm::InputData) -> Result, + pub rpc_url: String, +} + +pub struct GetOperatorInfoInvokeCall { + pub address: Address, +} + +pub struct CreateJobInvokeCall { + pub binary_ref: String, + pub input_refs: Vec, + pub args: Vec, + pub env: Vec<(String, String)>, + pub timeout_secs: u64, +} + +pub struct ClaimJobInvokeCall { + pub job_id: u64, +} + +pub struct CompleteJobInvokeCall { + pub job_id: u64, + pub output_refs: Vec, + pub output_commitment: B256, + pub exit_code: i32, +} + +pub struct FailJobInvokeCall { + pub job_id: u64, + pub error: String, + pub exit_code: i32, +} + +pub fn is_register_node_operator_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == REGISTER_NODE_OPERATOR_SELECTOR +} + +pub fn is_get_operator_info_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == GET_OPERATOR_INFO_SELECTOR +} + +pub fn is_get_active_operators_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == GET_ACTIVE_OPERATORS_SELECTOR +} + +pub fn is_create_job_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == CREATE_JOB_SELECTOR +} + +pub fn is_claim_job_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == CLAIM_JOB_SELECTOR +} + +pub fn is_complete_job_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == COMPLETE_JOB_SELECTOR +} + +pub fn is_fail_job_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == FAIL_JOB_SELECTOR +} + +pub fn is_finalize_blob_call(input: &ipc_storage_actor_sdk::evm::InputData) -> bool { + input.selector() == FINALIZE_BLOB_SELECTOR +} + +pub fn parse_register_node_operator_input( + input: &ipc_storage_actor_sdk::evm::InputData, +) -> Result { + let calldata = input.calldata(); + if calldata.len() < 64 { + return Err(actor_error!(illegal_argument, "invalid call: input too short")); + } + + let bls_offset = decode_offset(calldata, 0)?; + let rpc_offset = decode_offset(calldata, 32)?; + + let bls_pubkey = decode_dynamic_bytes(calldata, bls_offset)?; + let rpc_bytes = decode_dynamic_bytes(calldata, rpc_offset)?; + let rpc_url = String::from_utf8(rpc_bytes) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: bad UTF-8: {}", e)))?; + + Ok(RegisterNodeOperatorInvokeCall { + bls_pubkey, + rpc_url, + }) +} + +pub fn parse_get_operator_info_input( + input: &ipc_storage_actor_sdk::evm::InputData, +) -> Result { + let calldata = input.calldata(); + if calldata.len() < 32 { + return Err(actor_error!(illegal_argument, "invalid call: input too short")); + } + let word = &calldata[0..32]; + if word[..12].iter().any(|b| *b != 0) { + return Err(actor_error!( + illegal_argument, + "invalid call: malformed address" + )); + } + let address: Address = H160::from_slice(&word[12..32]).into(); + Ok(GetOperatorInfoInvokeCall { address }) +} + +pub fn parse_create_job_input( + input: &ipc_storage_actor_sdk::evm::InputData, +) -> Result { + let calldata = input.calldata(); + if calldata.len() < 32 * 5 { + return Err(actor_error!(illegal_argument, "invalid call: input too short")); + } + + let binary_ref = decode_dynamic_string(calldata, decode_offset(calldata, 0)?)?; + let input_refs = decode_dynamic_string_array(calldata, decode_offset(calldata, 32)?)?; + let args = decode_dynamic_string_array(calldata, decode_offset(calldata, 64)?)?; + let env_raw = decode_dynamic_string_array(calldata, decode_offset(calldata, 96)?)?; + let timeout_secs = decode_u64_word(calldata, 128)?; + + let mut env = Vec::with_capacity(env_raw.len()); + for entry in env_raw { + let (k, v) = entry.split_once('=').ok_or_else(|| { + actor_error!( + illegal_argument, + "invalid call: env entries must be formatted as KEY=VALUE" + ) + })?; + env.push((k.to_string(), v.to_string())); + } + + Ok(CreateJobInvokeCall { + binary_ref, + input_refs, + args, + env, + timeout_secs, + }) +} + +pub fn parse_claim_job_input( + input: &ipc_storage_actor_sdk::evm::InputData, +) -> Result { + let calldata = input.calldata(); + if calldata.len() < 32 { + return Err(actor_error!(illegal_argument, "invalid call: input too short")); + } + Ok(ClaimJobInvokeCall { + job_id: decode_u64_word(calldata, 0)?, + }) +} + +pub fn parse_complete_job_input( + input: &ipc_storage_actor_sdk::evm::InputData, +) -> Result { + let calldata = input.calldata(); + if calldata.len() < 32 * 4 { + return Err(actor_error!(illegal_argument, "invalid call: input too short")); + } + + let job_id = decode_u64_word(calldata, 0)?; + let output_refs = decode_dynamic_string_array(calldata, decode_offset(calldata, 32)?)?; + let output_commitment = decode_b256_word(calldata, 64)?; + let exit_code = decode_i32_word(calldata, 96)?; + + Ok(CompleteJobInvokeCall { + job_id, + output_refs, + output_commitment, + exit_code, + }) +} + +pub fn parse_fail_job_input( + input: &ipc_storage_actor_sdk::evm::InputData, +) -> Result { + let calldata = input.calldata(); + if calldata.len() < 32 * 3 { + return Err(actor_error!(illegal_argument, "invalid call: input too short")); + } + + let job_id = decode_u64_word(calldata, 0)?; + let error = decode_dynamic_string(calldata, decode_offset(calldata, 32)?)?; + let exit_code = decode_i32_word(calldata, 64)?; + + Ok(FailJobInvokeCall { + job_id, + error, + exit_code, + }) +} + +/// Parses ABI-encoded calldata for `finalizeBlob(bytes32,address,bytes32,uint64,string,uint8,bytes,uint128)`. +pub fn parse_finalize_blob_input( + input: &ipc_storage_actor_sdk::evm::InputData, + rt: &impl Runtime, +) -> Result { + let calldata = input.calldata(); + // 8 head slots: source(32) + subscriber(32) + blobHash(32) + size(32) + // + string_offset(32) + status(32) + bytes_offset(32) + signerBitmap(32) + if calldata.len() < 32 * 8 { + return Err(actor_error!( + illegal_argument, + "invalid finalizeBlob call: input too short" + )); + } + + let source = decode_b256_word(calldata, 0)?; + let subscriber_h160 = decode_address_word(calldata, 32)?; + let subscriber: Address = subscriber_h160.into(); + let subscriber = rt + .resolve_address(&subscriber) + .map(Address::new_id) + .unwrap_or(subscriber); + let hash = decode_b256_word(calldata, 64)?; + let size = decode_u64_word(calldata, 96)?; + let subscription_id_str = decode_dynamic_string(calldata, decode_offset(calldata, 128)?)?; + let subscription_id: SubscriptionId = subscription_id_str.try_into().map_err(|e| { + actor_error!( + illegal_argument, + format!("invalid finalizeBlob call: bad subscription id: {}", e) + ) + })?; + let status_u8 = decode_u8_word(calldata, 160)?; + let status = solidity_enum_to_blob_status(status_u8)?; + let aggregated_signature = decode_dynamic_bytes(calldata, decode_offset(calldata, 192)?)?; + let signer_bitmap = decode_u128_word(calldata, 224)?; + + Ok(FinalizeBlobParams { + source, + subscriber, + hash, + size, + id: subscription_id, + status, + aggregated_signature, + signer_bitmap, + }) +} + +impl From for CreateJobParams { + fn from(value: CreateJobInvokeCall) -> Self { + CreateJobParams { + binary_ref: value.binary_ref, + input_refs: value.input_refs, + args: value.args, + env: value.env, + timeout_secs: value.timeout_secs, + } + } +} + +impl From for ClaimJobParams { + fn from(value: ClaimJobInvokeCall) -> Self { + ClaimJobParams { id: value.job_id } + } +} + +impl From for CompleteJobParams { + fn from(value: CompleteJobInvokeCall) -> Self { + CompleteJobParams { + id: value.job_id, + output_refs: value.output_refs, + output_commitment: value.output_commitment, + exit_code: value.exit_code, + } + } +} + +impl From for FailJobParams { + fn from(value: FailJobInvokeCall) -> Self { + FailJobParams { + id: value.job_id, + reason: value.error, + exit_code: value.exit_code, + } + } +} + +pub fn encode_get_operator_info_output(info: Option) -> Result, ActorError> { + let (bls_pubkey, rpc_url, active) = if let Some(info) = info { + (info.bls_pubkey, info.rpc_url.into_bytes(), info.active) + } else { + (Vec::new(), Vec::new(), false) + }; + + let bls_section = encode_dynamic_bytes(&bls_pubkey); + let rpc_section = encode_dynamic_bytes(&rpc_url); + + let head_size = 32 * 3; + let bls_offset = head_size; + let rpc_offset = head_size + bls_section.len(); + + let mut output = Vec::with_capacity(head_size + bls_section.len() + rpc_section.len()); + output.extend_from_slice(&abi_word_from_usize(bls_offset)); + output.extend_from_slice(&abi_word_from_usize(rpc_offset)); + output.extend_from_slice(&abi_word_from_bool(active)); + output.extend_from_slice(&bls_section); + output.extend_from_slice(&rpc_section); + Ok(output) +} + +pub fn encode_get_active_operators_output(operators: Vec
) -> Result, ActorError> { + let mut operators_section = Vec::with_capacity(32 + operators.len() * 32); + operators_section.extend_from_slice(&abi_word_from_usize(operators.len())); + for operator in operators { + let h160 = H160::try_from(operator).map_err(|e| { + actor_error!( + illegal_argument, + format!("failed to encode operator address: {}", e) + ) + })?; + operators_section.extend_from_slice(&abi_word_from_address(h160)); + } + + let mut output = Vec::with_capacity(32 + operators_section.len()); + output.extend_from_slice(&abi_word_from_usize(32)); + output.extend_from_slice(&operators_section); + Ok(output) +} + +fn decode_offset(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!(illegal_argument, "invalid call: malformed offset")); + } + let word = &calldata[at..end]; + if word[..24].iter().any(|b| *b != 0) { + return Err(actor_error!( + illegal_argument, + "invalid call: offset too large" + )); + } + let mut n = [0u8; 8]; + n.copy_from_slice(&word[24..32]); + Ok(u64::from_be_bytes(n) as usize) +} + +fn decode_dynamic_bytes(calldata: &[u8], offset: usize) -> Result, ActorError> { + if offset + 32 > calldata.len() { + return Err(actor_error!( + illegal_argument, + "invalid call: dynamic offset out of bounds" + )); + } + + let len = decode_offset(calldata, offset)?; + let start = offset + 32; + let end = start + .checked_add(len) + .ok_or_else(|| actor_error!(illegal_argument, "invalid call: overflow"))?; + + if end > calldata.len() { + return Err(actor_error!( + illegal_argument, + "invalid call: dynamic value out of bounds" + )); + } + + Ok(calldata[start..end].to_vec()) +} + +fn decode_dynamic_string(calldata: &[u8], offset: usize) -> Result { + let bytes = decode_dynamic_bytes(calldata, offset)?; + String::from_utf8(bytes) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: bad UTF-8: {}", e))) +} + +fn decode_dynamic_string_array(calldata: &[u8], offset: usize) -> Result, ActorError> { + if offset + 32 > calldata.len() { + return Err(actor_error!( + illegal_argument, + "invalid call: dynamic offset out of bounds" + )); + } + + let count = decode_offset(calldata, offset)?; + let head_start = offset + 32; + let head_size = count + .checked_mul(32) + .ok_or_else(|| actor_error!(illegal_argument, "invalid call: overflow"))?; + let head_end = head_start + .checked_add(head_size) + .ok_or_else(|| actor_error!(illegal_argument, "invalid call: overflow"))?; + + if head_end > calldata.len() { + return Err(actor_error!( + illegal_argument, + "invalid call: array head out of bounds" + )); + } + + let mut out = Vec::with_capacity(count); + for i in 0..count { + let relative = decode_offset(calldata, head_start + i * 32)?; + let string_offset = head_start + .checked_add(relative) + .ok_or_else(|| actor_error!(illegal_argument, "invalid call: overflow"))?; + out.push(decode_dynamic_string(calldata, string_offset)?); + } + Ok(out) +} + +fn decode_u64_word(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!(illegal_argument, "invalid call: malformed word")); + } + let word = &calldata[at..end]; + if word[..24].iter().any(|b| *b != 0) { + return Err(actor_error!(illegal_argument, "invalid call: integer too large")); + } + let mut n = [0u8; 8]; + n.copy_from_slice(&word[24..32]); + Ok(u64::from_be_bytes(n)) +} + +fn decode_i32_word(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!(illegal_argument, "invalid call: malformed word")); + } + let word = &calldata[at..end]; + let mut n = [0u8; 4]; + n.copy_from_slice(&word[28..32]); + Ok(i32::from_be_bytes(n)) +} + +fn decode_b256_word(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!(illegal_argument, "invalid call: malformed word")); + } + let mut out = [0u8; 32]; + out.copy_from_slice(&calldata[at..end]); + Ok(B256(out)) +} + +fn decode_address_word(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!( + illegal_argument, + "invalid call: malformed address word" + )); + } + let word = &calldata[at..end]; + if word[..12].iter().any(|b| *b != 0) { + return Err(actor_error!( + illegal_argument, + "invalid call: malformed address" + )); + } + Ok(H160::from_slice(&word[12..32])) +} + +fn decode_u8_word(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!(illegal_argument, "invalid call: malformed word")); + } + let word = &calldata[at..end]; + if word[..31].iter().any(|b| *b != 0) { + return Err(actor_error!( + illegal_argument, + "invalid call: uint8 value too large" + )); + } + Ok(word[31]) +} + +fn decode_u128_word(calldata: &[u8], at: usize) -> Result { + let end = at + 32; + if end > calldata.len() { + return Err(actor_error!(illegal_argument, "invalid call: malformed word")); + } + let word = &calldata[at..end]; + if word[..16].iter().any(|b| *b != 0) { + return Err(actor_error!( + illegal_argument, + "invalid call: uint128 value too large" + )); + } + let mut n = [0u8; 16]; + n.copy_from_slice(&word[16..32]); + Ok(u128::from_be_bytes(n)) +} + +fn solidity_enum_to_blob_status(value: u8) -> Result { + match value { + 0 => Ok(BlobStatus::Added), + 1 => Ok(BlobStatus::Pending), + 2 => Ok(BlobStatus::Resolved), + 3 => Ok(BlobStatus::Failed), + _ => Err(actor_error!( + illegal_argument, + format!("invalid BlobStatus enum value: {}", value) + )), + } +} + +fn abi_word_from_usize(value: usize) -> [u8; 32] { + let mut word = [0u8; 32]; + word[24..32].copy_from_slice(&(value as u64).to_be_bytes()); + word +} + +fn abi_word_from_bool(value: bool) -> [u8; 32] { + let mut word = [0u8; 32]; + word[31] = u8::from(value); + word +} + +fn abi_word_from_address(value: H160) -> [u8; 32] { + let mut word = [0u8; 32]; + word[12..32].copy_from_slice(&value.to_fixed_bytes()); + word +} + +fn encode_dynamic_bytes(value: &[u8]) -> Vec { + let mut out = Vec::with_capacity(32 + padded_32_len(value.len())); + out.extend_from_slice(&abi_word_from_usize(value.len())); + out.extend_from_slice(value); + let padding = padded_32_len(value.len()) - value.len(); + out.extend(std::iter::repeat(0u8).take(padding)); + out +} + +fn padded_32_len(size: usize) -> usize { + if size == 0 { 0 } else { size.div_ceil(32) * 32 } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn address_word(bytes20: [u8; 20]) -> [u8; 32] { + let mut word = [0u8; 32]; + word[12..32].copy_from_slice(&bytes20); + word + } + + #[test] + fn parses_get_operator_info_input_address() { + let addr = [0x11u8; 20]; + let mut input = Vec::new(); + input.extend_from_slice(&GET_OPERATOR_INFO_SELECTOR); + input.extend_from_slice(&address_word(addr)); + let input = + ipc_storage_actor_sdk::evm::InputData::try_from(ipc_storage_actor_sdk::evm::InvokeContractParams { + input_data: input, + }) + .expect("valid input"); + + let parsed = parse_get_operator_info_input(&input).expect("parse succeeds"); + let expected = Address::new_delegated(10, &addr).expect("delegated"); + assert_eq!(parsed.address, expected); + } + + #[test] + fn encodes_get_active_operators_output_as_address_array() { + let id = Address::new_id(66); + let delegated = Address::new_delegated(10, &[0x22; 20]).expect("delegated"); + + let encoded = encode_get_active_operators_output(vec![id, delegated]).expect("encode"); + + assert_eq!(&encoded[0..32], &abi_word_from_usize(32)); + assert_eq!(&encoded[32..64], &abi_word_from_usize(2)); + + let id_h160 = H160::try_from(id).expect("id to h160"); + let delegated_h160 = H160::try_from(delegated).expect("delegated to h160"); + assert_eq!(&encoded[64..96], &abi_word_from_address(id_h160)); + assert_eq!(&encoded[96..128], &abi_word_from_address(delegated_h160)); + } + + #[test] + fn encodes_get_operator_info_output_tuple() { + let info = OperatorInfo { + bls_pubkey: vec![1, 2, 3, 4], + rpc_url: "http://127.0.0.1:8081".to_string(), + active: true, + }; + let encoded = encode_get_operator_info_output(Some(info)).expect("encode"); + + assert_eq!(&encoded[0..32], &abi_word_from_usize(96)); + let bls_section_len = 32 + 32; // len + padded data for 4 bytes + assert_eq!( + &encoded[32..64], + &abi_word_from_usize(96 + bls_section_len) + ); + assert_eq!(&encoded[64..96], &abi_word_from_bool(true)); + } +} + fn blob_status_as_solidity_enum(blob_status: BlobStatus) -> u8 { match blob_status { BlobStatus::Added => 0, diff --git a/fendermint/actors/blobs/src/state.rs b/fendermint/actors/blobs/src/state.rs index b09747d2a7..d6b1d3eae6 100644 --- a/fendermint/actors/blobs/src/state.rs +++ b/fendermint/actors/blobs/src/state.rs @@ -12,11 +12,13 @@ use fvm_shared::econ::TokenAmount; pub mod accounts; pub mod blobs; pub mod credit; +pub mod execution; pub mod operators; use accounts::Accounts; use blobs::{Blobs, DeleteBlobStateParams}; use credit::Credits; +use execution::ExecutionState; use operators::Operators; /// The state represents all accounts and stored blobs. @@ -30,6 +32,8 @@ pub struct State { pub blobs: Blobs, /// Registry of node operators for blob storage. pub operators: Operators, + /// Minimal execution jobs state (MVP). + pub execution: ExecutionState, } impl State { @@ -40,6 +44,7 @@ impl State { accounts: Accounts::new(store)?, blobs: Blobs::new(store)?, operators: Operators::new(store)?, + execution: ExecutionState::default(), }) } diff --git a/fendermint/actors/blobs/src/state/execution.rs b/fendermint/actors/blobs/src/state/execution.rs new file mode 100644 index 0000000000..4828cf6e9d --- /dev/null +++ b/fendermint/actors/blobs/src/state/execution.rs @@ -0,0 +1,165 @@ +// Copyright 2026 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::execution::{ + CompleteJobParams, CreateJobParams, ExecutionJob, FailJobParams, JobStatus, ListJobsParams, + ListJobsReturn, +}; +use fil_actors_runtime::ActorError; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; + +use crate::State; + +#[derive(Debug, Default, Serialize_tuple, Deserialize_tuple)] +pub struct ExecutionState { + pub next_job_id: u64, + pub jobs: Vec, +} + +impl State { + pub fn create_job( + &mut self, + creator: Address, + params: CreateJobParams, + epoch: ChainEpoch, + ) -> Result { + if params.binary_ref.is_empty() { + return Err(ActorError::illegal_argument("binary_ref cannot be empty".into())); + } + if params.timeout_secs == 0 { + return Err(ActorError::illegal_argument("timeout_secs must be > 0".into())); + } + + let id = self.execution.next_job_id; + self.execution.next_job_id += 1; + + let job = ExecutionJob { + id, + creator, + claimed_by: None, + status: JobStatus::Pending, + binary_ref: params.binary_ref, + input_refs: params.input_refs, + args: params.args, + env: params.env, + timeout_secs: params.timeout_secs, + created_epoch: epoch, + started_epoch: None, + completed_epoch: None, + output_refs: Vec::new(), + output_commitment: None, + exit_code: None, + error: None, + }; + + self.execution.jobs.push(job.clone()); + Ok(job) + } + + pub fn claim_job(&mut self, worker: Address, id: u64, epoch: ChainEpoch) -> Result { + let job = self + .execution + .jobs + .iter_mut() + .find(|j| j.id == id) + .ok_or_else(|| ActorError::not_found(format!("job {} not found", id)))?; + + if job.status != JobStatus::Pending { + return Err(ActorError::illegal_state(format!( + "job {} is not pending (status: {:?})", + id, job.status + ))); + } + + job.status = JobStatus::Claimed; + job.claimed_by = Some(worker); + job.started_epoch = Some(epoch); + Ok(job.clone()) + } + + pub fn complete_job( + &mut self, + worker: Address, + params: CompleteJobParams, + epoch: ChainEpoch, + ) -> Result { + let job = self + .execution + .jobs + .iter_mut() + .find(|j| j.id == params.id) + .ok_or_else(|| ActorError::not_found(format!("job {} not found", params.id)))?; + + if job.claimed_by != Some(worker) { + return Err(ActorError::forbidden("only claiming worker can complete job".into())); + } + if !(job.status == JobStatus::Claimed || job.status == JobStatus::Running) { + return Err(ActorError::illegal_state(format!( + "job {} is not claim/running (status: {:?})", + params.id, job.status + ))); + } + + job.status = JobStatus::Succeeded; + job.completed_epoch = Some(epoch); + job.output_refs = params.output_refs; + job.output_commitment = Some(params.output_commitment); + job.exit_code = Some(params.exit_code); + job.error = None; + Ok(job.clone()) + } + + pub fn fail_job( + &mut self, + worker: Address, + params: FailJobParams, + epoch: ChainEpoch, + ) -> Result { + let job = self + .execution + .jobs + .iter_mut() + .find(|j| j.id == params.id) + .ok_or_else(|| ActorError::not_found(format!("job {} not found", params.id)))?; + + if job.claimed_by != Some(worker) { + return Err(ActorError::forbidden("only claiming worker can fail job".into())); + } + if !(job.status == JobStatus::Claimed || job.status == JobStatus::Running) { + return Err(ActorError::illegal_state(format!( + "job {} is not claim/running (status: {:?})", + params.id, job.status + ))); + } + + job.status = JobStatus::Failed; + job.completed_epoch = Some(epoch); + job.exit_code = Some(params.exit_code); + job.error = Some(params.reason); + Ok(job.clone()) + } + + pub fn get_job(&self, id: u64) -> Option { + self.execution.jobs.iter().find(|j| j.id == id).cloned() + } + + pub fn list_jobs(&self, params: ListJobsParams) -> ListJobsReturn { + let limit = if params.limit == 0 { 100 } else { params.limit as usize }; + let mut jobs: Vec = self + .execution + .jobs + .iter() + .filter(|job| { + params + .status + .as_ref() + .is_none_or(|status| &job.status == status) + }) + .take(limit) + .cloned() + .collect(); + jobs.shrink_to_fit(); + ListJobsReturn { jobs } + } +} diff --git a/fendermint/rpc/src/message.rs b/fendermint/rpc/src/message.rs index 686c81dbe3..284b70e4f9 100644 --- a/fendermint/rpc/src/message.rs +++ b/fendermint/rpc/src/message.rs @@ -195,6 +195,11 @@ impl SignedMessageFactory { self.inner.address() } + /// Set the sequence to an arbitrary value, e.g. after resyncing from chain state. + pub fn set_sequence(&mut self, sequence: u64) { + self.inner.set_sequence(sequence); + } + /// Transfer tokens to another account. pub fn transfer( &mut self, diff --git a/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs b/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs index eb4adb9aed..fabd8764d3 100644 --- a/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs +++ b/fendermint/vm/interpreter/src/fvm/legacy_topdown.rs @@ -47,6 +47,21 @@ impl LegacyTopDownHandler { height: finality.height as u64, block_hash: finality.block_hash, }; + let quorum_threshold = atomically(|| self.votes.quorum_threshold()).await; + + // In a single-validator subnet, self-attestation should not depend on local + // cache catch-up. Otherwise aggressive proposals can be rejected by the same + // node that created them, stalling consensus at one height. + if quorum_threshold == 1 { + let committed_height = atomically(|| { + self.provider + .last_committed_finality() + .map(|f| f.map(|f| f.height).unwrap_or_default()) + }) + .await; + return prop.height > committed_height; + } + atomically(|| self.provider.check_proposal(&prop)).await } @@ -103,7 +118,7 @@ impl LegacyTopDownHandler { async fn chain_message_from_finality_or_quorum(&self) -> Option { atomically(|| self.votes.pause_votes_until_find_quorum()).await; - let (parent, quorum) = atomically(|| { + let (parent, quorum, quorum_threshold) = atomically(|| { let parent = self.provider.next_proposal()?; let quorum = self @@ -111,29 +126,93 @@ impl LegacyTopDownHandler { .find_quorum()? .map(|(height, block_hash)| IPCParentFinality { height, block_hash }); - Ok((parent, quorum)) + let quorum_threshold = self.votes.quorum_threshold()?; + + Ok((parent, quorum, quorum_threshold)) }) .await; let parent = parent?; - let quorum = if let Some(quorum) = quorum { - quorum - } else { - emit!( - DEBUG, - ParentFinalityMissingQuorum { - block_height: parent.height, - block_hash: &hex::encode(&parent.block_hash), - } - ); - return None; - }; - - let finality = if parent.height <= quorum.height { - parent + // Fast-path for single-validator subnets: quorum threshold is 1, so requiring + // a separate vote-derived quorum can unnecessarily throttle catch-up. + // + // In addition, bypass `next_proposal()` bounds (`max_proposal_range`, `proposal_delay`) + // and favor the freshest finalized parent view queried directly from parent RPC + // (`chain_head - chain_head_delay`). This avoids being throttled by local cache + // catch-up speed when the node is far behind. + let finality = if quorum_threshold == 1 { + let committed_height = atomically(|| { + self.provider + .last_committed_finality() + .map(|f| f.map(|f| f.height).unwrap_or_default()) + }) + .await; + + let remote_finalized = self + .provider + .latest_finalized_parent_view() + .await + .ok() + .flatten(); + let candidate = if let Some(remote_finalized) = + remote_finalized.filter(|f| f.height > committed_height) + { + remote_finalized + } else { + let latest_non_null = atomically(|| { + let latest = self.provider.latest_height()?; + let committed = self.provider.last_committed_finality()?; + + let (latest, committed_height) = match (latest, committed) { + (Some(latest), Some(committed)) => (latest, committed.height), + _ => return Ok(None), + }; + + if latest <= committed_height { + return Ok(None); + } + + let latest_non_null = self + .provider + .first_non_null_block(latest)? + .filter(|h| *h > committed_height); + + let Some(height) = latest_non_null else { + return Ok(None); + }; + + let Some(block_hash) = self.provider.block_hash(height)? else { + return Ok(None); + }; + + Ok(Some(IPCParentFinality { height, block_hash })) + }) + .await; + + latest_non_null.unwrap_or(parent) + }; + + candidate } else { - quorum + let quorum = if let Some(quorum) = quorum { + quorum + } else { + emit!( + DEBUG, + ParentFinalityMissingQuorum { + block_height: parent.height, + block_hash: &hex::encode(&parent.block_hash), + } + ); + return None; + }; + + if parent.height <= quorum.height { + parent + } else { + quorum + } }; Some(ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { diff --git a/fendermint/vm/interpreter/src/fvm/topdown.rs b/fendermint/vm/interpreter/src/fvm/topdown.rs index 5970afae88..4caf3f97cf 100644 --- a/fendermint/vm/interpreter/src/fvm/topdown.rs +++ b/fendermint/vm/interpreter/src/fvm/topdown.rs @@ -20,6 +20,9 @@ use crate::types::AppliedMessage; use ipc_api::cross::IpcEnvelope; use ipc_observability::emit; +/// Maximum inclusive parent-height span processed per legacy topdown chunk. +const LEGACY_TOPDOWN_CHUNK_SIZE: BlockHeight = 200; + #[derive(Clone, Debug)] pub struct F3ExecutionCacheRetryConfig { pub backoff_initial: std::time::Duration, @@ -399,41 +402,75 @@ where // be _at least_ 1 height behind. let (execution_fr, execution_to) = (prev_height + 1, finality.height); - // error happens if we cannot get the validator set from ipc agent after retries - let validator_changes = legacy - .validator_changes_from(execution_fr, execution_to) - .await - .context("failed to fetch validator changes")?; - - tracing::debug!( - from = execution_fr, - to = execution_to, - msgs = validator_changes.len(), - "chain interpreter received total validator changes" - ); + let mut total_validator_changes = 0usize; + let mut total_topdown_msgs = 0usize; + let mut ret: Option = None; + let mut chunk_start = execution_fr; + while chunk_start <= execution_to { + let chunk_end = chunk_start + .saturating_add(LEGACY_TOPDOWN_CHUNK_SIZE.saturating_sub(1)) + .min(execution_to); + + // error happens if we cannot get the validator set from ipc agent after retries + let validator_changes = legacy + .validator_changes_from(chunk_start, chunk_end) + .await + .context("failed to fetch validator changes")?; + total_validator_changes += validator_changes.len(); + + tracing::debug!( + chunk_start, + chunk_end, + changes = validator_changes.len(), + "chain interpreter received validator changes chunk" + ); - self.inner - .gateway_caller - .store_validator_changes(state, validator_changes) - .context("failed to store validator changes")?; + self.inner + .gateway_caller + .store_validator_changes(state, validator_changes) + .context("failed to store validator changes")?; + + // error happens if we cannot get the cross messages from ipc agent after retries + let msgs = legacy + .top_down_msgs_from(chunk_start, chunk_end) + .await + .context("failed to fetch top down messages")?; + total_topdown_msgs += msgs.len(); + + tracing::debug!( + chunk_start, + chunk_end, + number_of_messages = msgs.len(), + "chain interpreter received topdown messages chunk", + ); + + if !msgs.is_empty() { + ret = Some( + self.execute_topdown_msgs(state, msgs) + .await + .context("failed to execute top down messages")?, + ); + } - // error happens if we cannot get the cross messages from ipc agent after retries - let msgs = legacy - .top_down_msgs_from(execution_fr, execution_to) - .await - .context("failed to fetch top down messages")?; + chunk_start = chunk_end.saturating_add(1); + } tracing::debug!( - number_of_messages = msgs.len(), start = execution_fr, end = execution_to, - "chain interpreter received topdown msgs", + total_validator_changes, + total_topdown_msgs, + "chain interpreter processed topdown effects in chunks", ); - let ret = self - .execute_topdown_msgs(state, msgs) - .await - .context("failed to execute top down messages")?; + // Preserve previous behavior: return an AppliedMessage even when there are no topdown msgs. + let ret = if let Some(ret) = ret { + ret + } else { + self.execute_topdown_msgs(state, Vec::::new()) + .await + .context("failed to execute empty top down messages batch")? + }; tracing::debug!("chain interpreter applied topdown msgs"); diff --git a/fendermint/vm/topdown/src/finality/fetch.rs b/fendermint/vm/topdown/src/finality/fetch.rs index 66ff519c9d..29dcdc69b9 100644 --- a/fendermint/vm/topdown/src/finality/fetch.rs +++ b/fendermint/vm/topdown/src/finality/fetch.rs @@ -72,18 +72,21 @@ impl ParentViewProvider for CachedF from: BlockHeight, to: BlockHeight, ) -> anyhow::Result> { - let mut v = vec![]; - for h in from..=to { - let mut r = self.validator_changes(h).await?; - tracing::debug!( - number_of_messages = r.len(), - height = h, - "fetched validator change set", - ); - v.append(&mut r); - } - - Ok(v) + let r = retry!( + self.config.exponential_back_off, + self.config.exponential_retry_limit, + self.parent_client + .get_validator_changes_range(from, to) + .await + ); + let changes = handle_null_round(r, Vec::new)?; + tracing::debug!( + from, + to, + number_of_messages = changes.len(), + "fetched validator change set range", + ); + Ok(changes) } /// Get top down message in the range `from` to `to`, both inclusive. For the check to be valid, one @@ -94,17 +97,19 @@ impl ParentViewProvider for CachedF from: BlockHeight, to: BlockHeight, ) -> anyhow::Result> { - let mut v = vec![]; - for h in from..=to { - let mut r = self.top_down_msgs(h).await?; - tracing::debug!( - number_of_top_down_messages = r.len(), - height = h, - "obtained topdown messages", - ); - v.append(&mut r); - } - Ok(v) + let r = retry!( + self.config.exponential_back_off, + self.config.exponential_retry_limit, + self.parent_client.get_top_down_msgs_range(from, to).await + ); + let msgs = handle_null_round(r, Vec::new)?; + tracing::debug!( + from, + to, + number_of_top_down_messages = msgs.len(), + "obtained topdown messages range", + ); + Ok(msgs) } } @@ -125,6 +130,35 @@ impl ParentFinalityProvider } impl CachedFinalityProvider { + /// Query parent RPC directly for the latest non-null finalized parent view, + /// independent from local cache catch-up speed. + pub async fn latest_finalized_parent_view(&self) -> anyhow::Result> { + let chain_head = self.parent_client.get_chain_head_height().await?; + if chain_head < self.config.chain_head_delay { + return Ok(None); + } + + // Align to the same finality window as the syncer. + let mut height = chain_head - self.config.chain_head_delay; + loop { + match self.parent_client.get_block_hash(height).await { + Ok(res) => { + return Ok(Some(IPCParentFinality { + height, + block_hash: res.block_hash, + })); + } + Err(e) if crate::is_null_round_str(&e.to_string()) => { + if height == 0 { + return Ok(None); + } + height -= 1; + } + Err(e) => return Err(e), + } + } + } + /// Creates an uninitialized provider /// We need this because `fendermint` has yet to be initialized and might /// not be able to provide an existing finality from the storage. This provider requires an @@ -134,50 +168,6 @@ impl CachedFinalityProvider { Ok(Self::new(config, genesis, None, parent_client)) } - /// Should always return the top down messages, only when ipc parent_client is down after exponential - /// retries - async fn validator_changes( - &self, - height: BlockHeight, - ) -> anyhow::Result> { - let r = self.inner.validator_changes(height).await?; - - if let Some(v) = r { - return Ok(v); - } - - let r = retry!( - self.config.exponential_back_off, - self.config.exponential_retry_limit, - self.parent_client - .get_validator_changes(height) - .await - .map(|r| r.value) - ); - - handle_null_round(r, Vec::new) - } - - /// Should always return the top down messages, only when ipc parent_client is down after exponential - /// retries - async fn top_down_msgs(&self, height: BlockHeight) -> anyhow::Result> { - let r = self.inner.top_down_msgs(height).await?; - - if let Some(v) = r { - return Ok(v); - } - - let r = retry!( - self.config.exponential_back_off, - self.config.exponential_retry_limit, - self.parent_client - .get_top_down_msgs(height) - .await - .map(|r| r.value) - ); - - handle_null_round(r, Vec::new) - } } impl CachedFinalityProvider { diff --git a/fendermint/vm/topdown/src/finality/mod.rs b/fendermint/vm/topdown/src/finality/mod.rs index 59ea2ad582..a2735cbcfc 100644 --- a/fendermint/vm/topdown/src/finality/mod.rs +++ b/fendermint/vm/topdown/src/finality/mod.rs @@ -31,14 +31,6 @@ fn ensure_sequential u64>(msgs: &[T], f: F) -> StmResult<(), Err Ok(()) } -pub(crate) fn validator_changes(p: &ParentViewPayload) -> Vec { - p.1.clone() -} - -pub(crate) fn topdown_cross_msgs(p: &ParentViewPayload) -> Vec { - p.2.clone() -} - #[cfg(test)] mod tests { use crate::proxy::ParentQueryProxy; diff --git a/fendermint/vm/topdown/src/finality/null.rs b/fendermint/vm/topdown/src/finality/null.rs index 89270e99dd..d40f6bbc72 100644 --- a/fendermint/vm/topdown/src/finality/null.rs +++ b/fendermint/vm/topdown/src/finality/null.rs @@ -1,11 +1,9 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use crate::finality::{ - ensure_sequential, topdown_cross_msgs, validator_changes, ParentViewPayload, -}; +use crate::finality::{ensure_sequential, ParentViewPayload}; use crate::{BlockHash, BlockHeight, Config, Error, IPCParentFinality, SequentialKeyCache}; -use async_stm::{abort, atomically, Stm, StmResult, TVar}; +use async_stm::{abort, Stm, StmResult, TVar}; use ipc_api::cross::IpcEnvelope; use ipc_api::staking::PowerChangeRequest; use std::cmp::min; @@ -43,22 +41,6 @@ impl FinalityWithNull { Ok(self.genesis_epoch) } - pub async fn validator_changes( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - let r = atomically(|| self.handle_null_block(height, validator_changes, Vec::new)).await; - Ok(r) - } - - pub async fn top_down_msgs( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - let r = atomically(|| self.handle_null_block(height, topdown_cross_msgs, Vec::new)).await; - Ok(r) - } - pub fn last_committed_finality(&self) -> Stm> { self.last_committed_finality.read_clone() } @@ -230,23 +212,6 @@ impl FinalityWithNull { Ok(None) } - fn handle_null_block T, D: Fn() -> T>( - &self, - height: BlockHeight, - f: F, - d: D, - ) -> Stm> { - let cache = self.cached_data.read()?; - Ok(cache.get_value(height).map(|v| { - if let Some(i) = v.as_ref() { - f(i) - } else { - tracing::debug!(height, "a null round detected, return default"); - d() - } - })) - } - fn get_at_height T>( &self, height: BlockHeight, diff --git a/fendermint/vm/topdown/src/proxy.rs b/fendermint/vm/topdown/src/proxy.rs index fa4f901e91..d41ca069bb 100644 --- a/fendermint/vm/topdown/src/proxy.rs +++ b/fendermint/vm/topdown/src/proxy.rs @@ -40,6 +40,38 @@ pub trait ParentQueryProxy { &self, height: BlockHeight, ) -> anyhow::Result>>; + + /// Get top down messages in an inclusive range. + async fn get_top_down_msgs_range( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + if from > to { + return Ok(Vec::new()); + } + let mut out = Vec::new(); + for height in from..=to { + out.extend(self.get_top_down_msgs(height).await?.value); + } + Ok(out) + } + + /// Get validator changes in an inclusive range. + async fn get_validator_changes_range( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + if from > to { + return Ok(Vec::new()); + } + let mut out = Vec::new(); + for height in from..=to { + out.extend(self.get_validator_changes(height).await?.value); + } + Ok(out) + } } /// The proxy to the subnet's parent @@ -116,6 +148,32 @@ impl ParentQueryProxy for IPCProviderProxy { v }) } + + async fn get_top_down_msgs_range( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + let mut msgs = self + .ipc_provider + .get_top_down_msgs_range(&self.child_subnet, from as ChainEpoch, to as ChainEpoch) + .await?; + msgs.sort_by(|a, b| a.local_nonce.cmp(&b.local_nonce)); + Ok(msgs) + } + + async fn get_validator_changes_range( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + let mut changes = self + .ipc_provider + .get_validator_changeset_range(&self.child_subnet, from as ChainEpoch, to as ChainEpoch) + .await?; + changes.sort_by(|a, b| a.configuration_number.cmp(&b.configuration_number)); + Ok(changes) + } } // TODO - create a macro for this @@ -186,6 +244,34 @@ impl ParentQueryProxy for IPCProviderProxyWithLatency { ) .await } + + #[instrument(skip(self))] + async fn get_top_down_msgs_range( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + emit_event_with_latency( + &self.inner.parent_subnet.to_string(), + "get_top_down_msgs_range", + || async { self.inner.get_top_down_msgs_range(from, to).await }, + ) + .await + } + + #[instrument(skip(self))] + async fn get_validator_changes_range( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + emit_event_with_latency( + &self.inner.parent_subnet.to_string(), + "get_validator_changeset_range", + || async { self.inner.get_validator_changes_range(from, to).await }, + ) + .await + } } // TODO Karel - make it nicer. Perhaps use a macro? diff --git a/fendermint/vm/topdown/src/sync/syncer.rs b/fendermint/vm/topdown/src/sync/syncer.rs index b74098742f..218f20b04a 100644 --- a/fendermint/vm/topdown/src/sync/syncer.rs +++ b/fendermint/vm/topdown/src/sync/syncer.rs @@ -95,20 +95,36 @@ where break; } - first_non_null_parent_hash = match self - .poll_next(latest_height_fetched + 1, first_non_null_parent_hash) + let next_height = latest_height_fetched + 1; + match self + .poll_next(next_height, first_non_null_parent_hash.clone()) .await { - Ok(h) => h, + Ok(h) => { + first_non_null_parent_hash = h; + latest_height_fetched += 1; + } Err(Error::ParentChainReorgDetected) => { tracing::warn!("potential reorg detected, clear cache and retry"); self.reset().await?; break; } + // A concurrent commit can move the tracked height forward while this poll is in-flight. + // In that case, this height is stale and should be ignored rather than surfaced as error. + Err(e @ Error::NotSequential) + | Err(e @ Error::NonSequentialParentViewInsert(_)) => { + if self.has_advanced_past(next_height).await { + tracing::debug!( + height = next_height, + error = e.to_string(), + "ignoring stale polled height after local state advanced" + ); + break; + } + return Err(anyhow!(e)); + } Err(e) => return Err(anyhow!(e)), - }; - - latest_height_fetched += 1; + } if latest_height_fetched == chain_head { tracing::debug!("reached the tip of the chain"); @@ -132,6 +148,21 @@ where atomically(|| self.provider.cached_blocks()).await > max_cache_blocks } + /// Returns true if either the provider cache or voting tally already moved to `height` or beyond. + async fn has_advanced_past(&self, height: BlockHeight) -> bool { + let (provider_height, tally_height) = atomically(|| { + let provider_height = self.provider.latest_height()?; + let tally_height = self.vote_tally.latest_height()?; + Ok((provider_height, tally_height)) + }) + .await; + + provider_height + .map(|cached_height| cached_height >= height) + .unwrap_or(false) + || tally_height >= height + } + /// Get the latest data stored in the cache to pull the next block async fn latest_cached_data(&self) -> (BlockHeight, BlockHash) { // we are getting the latest height fetched in cache along with the first non null block diff --git a/fendermint/vm/topdown/src/toggle.rs b/fendermint/vm/topdown/src/toggle.rs index a300eb73ef..b211ab5ae4 100644 --- a/fendermint/vm/topdown/src/toggle.rs +++ b/fendermint/vm/topdown/src/toggle.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::finality::ParentViewPayload; +use crate::proxy::ParentQueryProxy; use crate::{ BlockHash, BlockHeight, CachedFinalityProvider, Error, IPCParentFinality, ParentFinalityProvider, ParentViewProvider, @@ -88,6 +89,16 @@ impl ParentFinalityProvider f } impl

Toggle> { + pub async fn latest_finalized_parent_view(&self) -> anyhow::Result> + where + P: ParentQueryProxy + Send + Sync + 'static, + { + match self.inner.as_ref() { + Some(p) => p.latest_finalized_parent_view().await, + None => Ok(None), + } + } + pub fn block_hash(&self, height: BlockHeight) -> Stm> { self.perform_or_else(|p| p.block_hash(height), None) } diff --git a/init-node.yaml b/init-node.yaml deleted file mode 100644 index 91bf93f956..0000000000 --- a/init-node.yaml +++ /dev/null @@ -1,24 +0,0 @@ -fendermint-overrides: | - [ipc] - subnet_id = "/r314159/t410..." - - [ipc.topdown] - chain_head_delay = 10 - proposal_delay = 2 - max_proposal_range = 100 - polling_interval = 10 - exponential_back_off = 5 - exponential_retry_limit = 5 - parent_http_endpoint = "https://api.calibration.node.glif.io/rpc/v1" - parent_registry = "0x..." - parent_gateway = "0x..." - - [ipc.topdown.f3] - [ipc.topdown.f3.proof_service] - enabled = true - polling_interval = "30s" - parent_rpc_url = "https://api.calibration.node.glif.io/rpc/v1" - gateway_id = "0x..." # parent gateway ETH address - [ipc.topdown.f3.proof_service.cache_config] - lookahead_instances = 5 - retention_epochs = 100 diff --git a/init-sub.yaml b/init-sub.yaml index 01badb90cd..65f5168876 100644 --- a/init-sub.yaml +++ b/init-sub.yaml @@ -4,26 +4,28 @@ # deploy: # url: "https://api.calibration.node.glif.io/rpc/v1" -# from: "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" +# from: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" # chain-id: 314159 +node-topdown-mode: legacy + create: parent: /r314159 chain-id: 314159 - from: 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 + from: 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 min-validator-stake: 1.0 min-validators: 1 bottomup-check-period: 50 permission-mode: federated supply-source-kind: native min-cross-msg-fee: 0.1 - genesis-subnet-ipc-contracts-owner: 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 + genesis-subnet-ipc-contracts-owner: 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 parent-filecoin-rpc: "https://api.calibration.node.glif.io/rpc/v1" activate: mode: federated validator-pubkeys: - - 0x048318535b54105d4a7aae60c08fc45f9687181b4fdfc625bd1a753fa7397fed753547f11ca8696646f2f3acb08e31016afac23e630c5d11f59f61fef57b0d2aa5 + - 0x04d64f81dcff5f31e67263e3d2df35b0a412bb11935ae6b897018a59bf5d6f9e99ad02cbedd2bfcfbd2a56f02f6812c9f978536496e972fe2fbbf96e8743fb28dc validator-power: - 1 diff --git a/ipc-storage/DESIGN.md b/ipc-storage/DESIGN.md index 9eaff6052b..87d49688fa 100644 --- a/ipc-storage/DESIGN.md +++ b/ipc-storage/DESIGN.md @@ -75,11 +75,13 @@ All data is encrypted on the client before chunking and distribution. Storage no ### Encryption Scheme -| Component | Algorithm | Description | -|-----------|-----------|-------------| -| **Symmetric encryption** | AES-256-GCM | Encrypts the actual data | -| **Key derivation** | HKDF-SHA256 | Derives encryption key from master secret | -| **Key encryption** | ECIES / RSA-OAEP | Encrypts DEK for storage/sharing | + +| Component | Algorithm | Description | +| ------------------------ | ---------------- | ----------------------------------------- | +| **Symmetric encryption** | AES-256-GCM | Encrypts the actual data | +| **Key derivation** | HKDF-SHA256 | Derives encryption key from master secret | +| **Key encryption** | ECIES / RSA-OAEP | Encrypts DEK for storage/sharing | + ### Encryption Flow @@ -177,11 +179,13 @@ Data is organized in a three-level hierarchy. All operations occur on **encrypte ### Terminology -| Term | Description | Typical Size | -|------|-------------|--------------| -| **Chunk** | A segment of encrypted data before/after erasure encoding | 1-64 MB | -| **Piece** | A subdivision of an encoded chunk | 256 KB - 1 MB | -| **Leaf** | The smallest unit for Merkle tree construction | 256 bytes - 1 KB | + +| Term | Description | Typical Size | +| --------- | --------------------------------------------------------- | ---------------- | +| **Chunk** | A segment of encrypted data before/after erasure encoding | 1-64 MB | +| **Piece** | A subdivision of an encoded chunk | 256 KB - 1 MB | +| **Leaf** | The smallest unit for Merkle tree construction | 256 bytes - 1 KB | + --- @@ -192,28 +196,33 @@ IPC Storage uses Reed-Solomon erasure coding, similar to [Storj](https://storj.i ### How Reed-Solomon Works Reed-Solomon encoding transforms `k` data shards into `k + m` total shards (where `m` = parity), such that: + - Any `k` of the `k + m` shards are sufficient to reconstruct the original data - Up to `m` shards can be lost without data loss ### Encoding Parameters -| Parameter | Symbol | Description | Example | -|-----------|--------|-------------|---------| -| Data shards | k | Number of original data shards per chunk | 15 | -| Parity shards | m | Number of redundancy shards per chunk | 8 | -| Total shards | n | k + m = total shards per chunk | 23 | -| Max chunk size | — | Maximum bytes per chunk before RS encoding | 16 MiB | -| Expansion factor | n/k | Storage overhead ratio | 1.53x | + +| Parameter | Symbol | Description | Example | +| ---------------- | ------ | ------------------------------------------ | ------- | +| Data shards | k | Number of original data shards per chunk | 15 | +| Parity shards | m | Number of redundancy shards per chunk | 8 | +| Total shards | n | k + m = total shards per chunk | 23 | +| Max chunk size | — | Maximum bytes per chunk before RS encoding | 16 MiB | +| Expansion factor | n/k | Storage overhead ratio | 1.53x | + ### Architecture The crate is built around three core traits: -| Trait | Purpose | -|-------|---------| -| `Encoder` | Splits raw bytes into `k` padded shards and computes `m` parity shards | -| `Decoder` | Recovers original data from any `k` of `k + m` shards | -| `NodeAssigner` | Maps each shard to a storage node | + +| Trait | Purpose | +| -------------- | ---------------------------------------------------------------------- | +| `Encoder` | Splits raw bytes into `k` padded shards and computes `m` parity shards | +| `Decoder` | Recovers original data from any `k` of `k + m` shards | +| `NodeAssigner` | Maps each shard to a storage node | + All trait methods are stateless (no `&self`) with an associated `Shard` type. Repair is simply decode followed by encode — no separate `Repairer` trait is needed. @@ -387,11 +396,13 @@ All Chunk Merkle Roots form the leaves of the File Merkle Tree: ### Summary -| Level | Input | Output | Count | -|-------|-------|--------|-------| -| 1 | Leaf data (encrypted bytes) | Piece Merkle Root (PMR) | leaves_per_piece leaves → 1 PMR | -| 2 | PMRs for one chunk | Chunk Merkle Root (CMR) | pieces_per_chunk PMRs → 1 CMR | -| 3 | CMRs for all chunks | File Merkle Root (FMR) | n CMRs → 1 FMR | + +| Level | Input | Output | Count | +| ----- | --------------------------- | ----------------------- | ------------------------------- | +| 1 | Leaf data (encrypted bytes) | Piece Merkle Root (PMR) | leaves_per_piece leaves → 1 PMR | +| 2 | PMRs for one chunk | Chunk Merkle Root (CMR) | pieces_per_chunk PMRs → 1 CMR | +| 3 | CMRs for all chunks | File Merkle Root (FMR) | n CMRs → 1 FMR | + --- @@ -415,6 +426,7 @@ struct StorageCommitment { ``` Everything else is derivable from these fields: + - `num_chunks = originalLen.div_ceil(MAX_CHUNK_SIZE)` - `chunk_data_len(i) = min(MAX_CHUNK_SIZE, originalLen - i * MAX_CHUNK_SIZE)` - Shard→node mapping: deterministic rotation over the epoch's node list @@ -546,11 +558,13 @@ struct ChallengeProof { ### Challenge Timing -| Parameter | Description | Typical Value | -|-----------|-------------|---------------| -| Challenge interval | Time between challenges per commitment | 1 hour | -| Response deadline | Time allowed for proof submission | 10 minutes | -| Consecutive failures | Failures before slashing | 3 | + +| Parameter | Description | Typical Value | +| -------------------- | -------------------------------------- | ------------- | +| Challenge interval | Time between challenges per commitment | 1 hour | +| Response deadline | Time allowed for proof submission | 10 minutes | +| Consecutive failures | Failures before slashing | 3 | + **Important**: Response deadline must be shorter than the time required to reconstruct a chunk from other nodes (to prevent lazy node attacks). @@ -624,13 +638,15 @@ function computeMerkleRoot( ### Proof Size Analysis -| Level | Proof Elements | Size per Element | Typical Total | -|-------|---------------|------------------|---------------| -| Leaf → PMR | logā‚‚(leaves_per_piece) | 32 bytes | ~320 bytes (10 levels) | -| PMR → CMR | logā‚‚(pieces_per_chunk) | 32 bytes | ~256 bytes (8 levels) | -| CMR → FMR | logā‚‚(num_chunks) | 32 bytes | ~224 bytes (7 levels) | -| Leaf data | 1 | leaf_size | ~256 bytes | -| **Total** | | | **~1 KB** | + +| Level | Proof Elements | Size per Element | Typical Total | +| ---------- | ---------------------- | ---------------- | ---------------------- | +| Leaf → PMR | logā‚‚(leaves_per_piece) | 32 bytes | ~320 bytes (10 levels) | +| PMR → CMR | logā‚‚(pieces_per_chunk) | 32 bytes | ~256 bytes (8 levels) | +| CMR → FMR | logā‚‚(num_chunks) | 32 bytes | ~224 bytes (7 levels) | +| Leaf data | 1 | leaf_size | ~256 bytes | +| **Total** | | | **~1 KB** | + --- @@ -638,30 +654,35 @@ function computeMerkleRoot( ### Attack Vectors and Mitigations -| Attack | Description | Mitigation | -|--------|-------------|------------| -| **Data withholding** | Node claims to store data but doesn't | Random challenges require actual data | -| **Lazy node** | Node reconstructs data on-demand from peers instead of storing | Response deadline < reconstruction time | -| **Proof precomputation** | Precompute all possible proofs | Large leaf count makes this infeasible | -| **Collusion** | Nodes share data only for challenges | Unpredictable VRF-based challenge timing | -| **Sybil attack** | Single entity runs multiple nodes | Stake requirements, reputation system | -| **Grinding** | Manipulate random challenge selection | Verifiable Random Functions (VRF) | -| **Data exposure** | Storage nodes read user data | Client-side encryption (AES-256-GCM) | + +| Attack | Description | Mitigation | +| ------------------------ | -------------------------------------------------------------- | ---------------------------------------- | +| **Data withholding** | Node claims to store data but doesn't | Random challenges require actual data | +| **Lazy node** | Node reconstructs data on-demand from peers instead of storing | Response deadline < reconstruction time | +| **Proof precomputation** | Precompute all possible proofs | Large leaf count makes this infeasible | +| **Collusion** | Nodes share data only for challenges | Unpredictable VRF-based challenge timing | +| **Sybil attack** | Single entity runs multiple nodes | Stake requirements, reputation system | +| **Grinding** | Manipulate random challenge selection | Verifiable Random Functions (VRF) | +| **Data exposure** | Storage nodes read user data | Client-side encryption (AES-256-GCM) | + ### Lazy Node Attack - Detailed Mitigation A malicious node could attempt to: + 1. Not store its assigned chunk 2. When challenged, download `k` chunks from other nodes 3. Reconstruct its chunk using Reed-Solomon decoding 4. Respond to challenge with reconstructed data **Mitigation**: Set response deadline such that: + ``` deadline < time_to_download_k_chunks + time_to_decode ``` For a 16 MB chunk with k=64: + - Download 64 Ɨ 16 MB = 1 GB from network - At 100 Mbps: ~80 seconds download time - Decoding: ~5-10 seconds @@ -695,12 +716,14 @@ Challenge randomness must be unpredictable and unbiasable: ### Cryptographic Assumptions -| Component | Algorithm | Security Level | -|-----------|-----------|----------------| -| Data encryption | AES-256-GCM | 256-bit | -| Hash function | Keccak-256 | 128-bit collision resistance | -| Randomness | VRF (e.g., ECVRF) | Unpredictable, verifiable | -| Erasure coding | Reed-Solomon GF(2^8) | Information-theoretic | + +| Component | Algorithm | Security Level | +| --------------- | -------------------- | ---------------------------- | +| Data encryption | AES-256-GCM | 256-bit | +| Hash function | Keccak-256 | 128-bit collision resistance | +| Randomness | VRF (e.g., ECVRF) | Unpredictable, verifiable | +| Erasure coding | Reed-Solomon GF(2^8) | Information-theoretic | + ### Slashing Conditions @@ -741,11 +764,13 @@ Users pay upfront to store data on the network. The cost is determined by the si write_cost = price_per_mb Ɨ file_size_in_mb Ɨ duration ``` -| Parameter | Description | -|-----------|-------------| -| `price_per_mb` | Network-determined price per megabyte per unit time | -| `file_size_in_mb` | Total size of the stored file in megabytes | -| `duration` | Storage duration (e.g., in epochs or blocks) | + +| Parameter | Description | +| ----------------- | --------------------------------------------------- | +| `price_per_mb` | Network-determined price per megabyte per unit time | +| `file_size_in_mb` | Total size of the stored file in megabytes | +| `duration` | Storage duration (e.g., in epochs or blocks) | + The user locks this payment into the storage contract when submitting their on-chain storage commitment. @@ -757,11 +782,13 @@ Node operators earn rewards by proving they continue to store their assigned dat node_reward = data_size_stored_in_mb Ɨ price_per_mb Ɨ duration_since_last_claim ``` -| Parameter | Description | -|-----------|-------------| -| `data_size_stored_in_mb` | Size of data this node is responsible for (its encoded chunk) | -| `price_per_mb` | Same per-MB rate from the storage commitment | -| `duration_since_last_claim` | Time elapsed since the node's last successful claim | + +| Parameter | Description | +| --------------------------- | ------------------------------------------------------------- | +| `data_size_stored_in_mb` | Size of data this node is responsible for (its encoded chunk) | +| `price_per_mb` | Same per-MB rate from the storage commitment | +| `duration_since_last_claim` | Time elapsed since the node's last successful claim | + ``` ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” @@ -794,10 +821,12 @@ Reading data uses an off-chain **payment ticket** model. Anyone who wants to ret read_cost = read_price_per_mb Ɨ file_size_in_mb ``` -| Parameter | Description | -|-----------|-------------| + +| Parameter | Description | +| ------------------- | --------------------------------------------------------- | | `read_price_per_mb` | Network or market-determined price per megabyte for reads | -| `file_size_in_mb` | Size of the data being read in megabytes | +| `file_size_in_mb` | Size of the data being read in megabytes | + #### Payment Ticket Flow @@ -838,10 +867,12 @@ Batch redemption lets node operators amortize on-chain transaction costs by subm ### Summary -| Payment Type | Payer | Recipient | Pricing | Settlement | -|-------------|-------|-----------|---------|------------| -| **Write** | Data owner | Storage nodes | `price_per_mb Ɨ size Ɨ duration` | On-chain claims (challenge-gated) | -| **Read** | Data reader | Serving node | `read_price_per_mb Ɨ size` | Off-chain tickets, redeemed on-chain | + +| Payment Type | Payer | Recipient | Pricing | Settlement | +| ------------ | ----------- | ------------- | -------------------------------- | ------------------------------------ | +| **Write** | Data owner | Storage nodes | `price_per_mb Ɨ size Ɨ duration` | On-chain claims (challenge-gated) | +| **Read** | Data reader | Serving node | `read_price_per_mb Ɨ size` | Off-chain tickets, redeemed on-chain | + --- @@ -1087,12 +1118,14 @@ If deadline exceeded: Guardians are a **trusted, permissioned set** registered at the protocol level: -| Requirement | Description | -|-------------|-------------| -| **Staked Collateral** | Guardians must stake tokens to participate | -| **Registration** | Protocol governance approves guardian additions | -| **Slashing** | Guardians can be slashed for malicious behavior | -| **Reputation** | Track record of successful repairs visible on-chain | + +| Requirement | Description | +| --------------------- | --------------------------------------------------- | +| **Staked Collateral** | Guardians must stake tokens to participate | +| **Registration** | Protocol governance approves guardian additions | +| **Slashing** | Guardians can be slashed for malicious behavior | +| **Reputation** | Track record of successful repairs visible on-chain | + This trusted model avoids DDoS and sybil attack vectors on the repair mechanism. @@ -1113,21 +1146,17 @@ This is the client's responsibility. The protocol does not provide automatic fal ## References 1. **Storj Whitepaper**: [https://storj.io/storj.pdf](https://storj.io/storj.pdf) - - Reed-Solomon erasure coding parameters - - Distributed storage architecture - + - Reed-Solomon erasure coding parameters + - Distributed storage architecture 2. **Filecoin Spec - Proof of Data Possession**: [https://spec.filecoin.io/](https://spec.filecoin.io/) - - PDP challenge-response protocol - - Merkle tree construction for storage proofs - + - PDP challenge-response protocol + - Merkle tree construction for storage proofs 3. **Reed-Solomon Error Correction**: [https://en.wikipedia.org/wiki/Reed–Solomon_error_correction](https://en.wikipedia.org/wiki/Reed–Solomon_error_correction) - - Mathematical foundations of erasure coding - + - Mathematical foundations of erasure coding 4. **Merkle Trees**: [https://en.wikipedia.org/wiki/Merkle_tree](https://en.wikipedia.org/wiki/Merkle_tree) - - Hash tree structure and verification - + - Hash tree structure and verification 5. **ECVRF (Verifiable Random Functions)**: [https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf) - - VRF specification for unpredictable randomness + - VRF specification for unpredictable randomness --- @@ -1165,3 +1194,4 @@ Shard→node mapping: Deterministic (no storage cost) Challenge response deadline: 30 seconds (Reconstruction time at 100 Mbps: ~90 seconds) ``` + diff --git a/ipc-storage/README.md b/ipc-storage/README.md index 5d76012edd..d54057e857 100644 --- a/ipc-storage/README.md +++ b/ipc-storage/README.md @@ -1,5 +1,99 @@ # Bucket Storage Guide (Path-Based Access) +## Quickstart (Subnet + Storage) + +This section is the shortest end-to-end setup for local testing with `ipc-cli`. + +### 0) Build the right binaries + +`ipc-storage` actors are only present when `ipc-storage` feature is enabled. + +```bash +# IPC CLI + node stack (must include ipc-storage feature) +cargo build --release -p ipc-cli --features ipc-storage + +# Storage node/gateway binaries +cargo build --release -p ipc-decentralized-storage --bin node --bin gateway +``` + +### 1) Create subnet from YAML + +Use your subnet config (for example `init-sub.yaml`): + +Before running `subnet init`, make sure YAML placeholders are replaced: +- `import-wallets[].private-key` (or `path`) for the account you control +- `create.from` and `create.genesis-subnet-ipc-contracts-owner` +- `activate.validator-pubkeys` with your validator public key(s) + +If the generated `~/.ipc/node_.yaml` contains a validator key placeholder, fill `validator.private-key` before `node init`. +The validator private key in `node_.yaml` must correspond to one of the pubkeys in `activate.validator-pubkeys`; otherwise `node start` runs but that validator identity is not the one activated on-chain. + +Quick check: + +```bash +# Derive uncompressed pubkey from the validator private key you put in node_.yaml +cast wallet public-key --private-key + +# Ensure this exactly matches an entry in activate.validator-pubkeys from subnet-init YAML. +``` + +```bash +./target/release/ipc-cli subnet init --config /absolute/path/to/init-sub.yaml +``` + +This generates `node_.yaml` in `~/.ipc/`. + +### 2) Initialize and start the validator node + +```bash +# Use the generated node config path from previous step: +./target/release/ipc-cli node init --config ~/.ipc/node_.yaml + +# Start your local subnet node +./target/release/ipc-cli node start --home ~/.node-ipc +``` + +### 3) Initialize storage config (creates dedicated operator key) + +```bash +./target/release/ipc-cli storage init \ + --node-config ~/.ipc/node_.yaml +``` + +This prints: +- `operator secret key file` (defaults to `~/.node-ipc/storage/operator.sk`) +- delegated operator address `t410...` (**fund this one**) +- native address `t1...` (diagnostic only) + +### 4) Fund the delegated operator address + +Use the `t410...` printed by `storage init`: + +```bash +./target/release/ipc-cli cross-msg fund \ + --subnet "" \ + --from \ + --to \ + 1 +``` + +### 5) Register operator and run storage + +```bash +./target/release/ipc-cli storage run \ + --config ~/.ipc/storage_.yaml \ + --register-operator +``` + +Expected healthy logs: +- `Successfully registered as node operator` +- node/gateway started +- gateway uses delegated sender (`t410...`) + +### 6) Important note about object uploads + +`POST /v1/objects` uploads bytes to Iroh, but on-chain object registration is a separate step (for example `addObject(...)` via `cast`/dropbox). If registration is skipped, gateway can keep showing `Found 0 added blobs`. + ## Configuration ```bash @@ -51,6 +145,11 @@ export SECRET_KEY_FILE=./test-network/keys/alice.sk ``` +Notes: +- `register-operator` now supports delegated (`t410...`) operator keys through the blobs actor `InvokeContract` facade path. +- Operator query methods are also available through the same facade path: `getOperatorInfo(address)` and `getActiveOperators()`. +- Storage node and gateway use delegated (`t410...`) sender path for on-chain interactions; fund delegated operator address before running. + ## 3. Launch ipc-dropbox Launch `ipc-dropbox` in `ipc-storage/ipc-dropbox` with `npm run dev`. diff --git a/ipc-storage/ipc-decentralized-storage/Cargo.toml b/ipc-storage/ipc-decentralized-storage/Cargo.toml index 07e37a5016..f2cac62b88 100644 --- a/ipc-storage/ipc-decentralized-storage/Cargo.toml +++ b/ipc-storage/ipc-decentralized-storage/Cargo.toml @@ -29,7 +29,8 @@ urlencoding.workspace = true blake3.workspace = true # HTTP client dependencies -reqwest = { version = "0.11", features = ["json"] } +reqwest = { version = "0.11", features = ["json", "multipart"] } +tempfile = "3.8" # CLI dependencies clap = { workspace = true, features = ["derive"] } diff --git a/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs b/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs index 7b33dc992b..9df4ab255f 100644 --- a/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs +++ b/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs @@ -3,12 +3,13 @@ //! CLI for running the blob gateway with objects API -use anyhow::{anyhow, Context, Result}; +use anyhow::{Context, Result}; use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; use clap::Parser; use fendermint_rpc::message::SignedMessageFactory; use fendermint_rpc::FendermintClient; use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::eam::EthAddress as FvmEthAddress; use fendermint_vm_message::query::FvmQueryHeight; use fvm_shared::address::{set_current_network, Address, Network}; use fvm_shared::chainid::ChainID; @@ -79,16 +80,16 @@ struct Args { iroh_v6_addr: Option, } -/// Get the next sequence number (nonce) of an account. -async fn get_sequence(client: &impl QueryClient, addr: &Address) -> Result { +/// Get the next sequence number (nonce) of an account if it exists. +async fn get_sequence_opt(client: &impl QueryClient, addr: &Address) -> Result> { let state = client .actor_state(addr, FvmQueryHeight::default()) .await .context("failed to get actor state")?; match state.value { - Some((_id, state)) => Ok(state.sequence), - None => Err(anyhow!("cannot find actor {addr}")), + Some((_id, state)) => Ok(Some(state.sequence)), + None => Ok(None), } } @@ -100,6 +101,11 @@ async fn main() -> Result<()> { .with(tracing_subscriber::fmt::layer()) .init(); + tracing::info!( + "gateway v{} (build 2026-04-01-fevm-facade)", + env!("CARGO_PKG_VERSION"), + ); + let args = Args::parse(); // Set the network for address display (f for mainnet, t for testnet) @@ -125,10 +131,13 @@ async fn main() -> Result<()> { .context("failed to read secret key")?; let pk = sk.public_key(); - // Use f1 address (secp256k1) for signing native FVM actor transactions - let from_addr = - Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; - tracing::info!("Gateway sender address: {}", from_addr); + let from_f1 = Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; + let from_eth = FvmEthAddress::new_secp256k1(&pk.serialize()) + .context("failed to derive delegated address from secret key")?; + let from_f410 = + Address::new_delegated(10, &from_eth.0).context("failed to create f410 address")?; + tracing::info!("Gateway sender f1 address: {}", from_f1); + tracing::info!("Gateway sender f410 address: {}", from_f410); // Parse or generate BLS private key if provided let _bls_private_key = if let Some(key_file) = &args.bls_key_file { @@ -214,11 +223,6 @@ async fn main() -> Result<()> { let client = FendermintClient::new_http(args.rpc_url, None) .context("failed to create Fendermint client")?; - // Query the account nonce from the state - let sequence = get_sequence(&client, &from_addr) - .await - .context("failed to get account sequence")?; - // Query the chain ID let chain_id = client .state_params(FvmQueryHeight::default()) @@ -228,6 +232,32 @@ async fn main() -> Result<()> { .chain_id; tracing::info!("Chain ID: {}", chain_id); + + // Prefer delegated f410 sender: FinalizeBlob now goes through the EVM facade + // (InvokeContract with ABI-encoded calldata) so it works with both f1 and f410. + let (from_addr, sequence) = if let Some(sequence) = get_sequence_opt(&client, &from_f410) + .await + .context("failed to get delegated account sequence")? + { + tracing::info!("Using delegated sender (f410) for gateway transactions"); + (from_f410, sequence) + } else if let Some(sequence) = get_sequence_opt(&client, &from_f1) + .await + .context("failed to get native account sequence")? + { + tracing::info!( + "Delegated f410 sender {} is not on-chain; falling back to native f1 sender", + from_f410 + ); + (from_f1, sequence) + } else { + anyhow::bail!( + "neither delegated sender {} nor native sender {} found on-chain; \ + fund the delegated (f410) address for gateway transaction signing", + from_f410, from_f1 + ); + }; + tracing::info!("Gateway sender address: {}", from_addr); tracing::info!("Account sequence: {}", sequence); // Create signed message factory diff --git a/ipc-storage/ipc-decentralized-storage/src/bin/node.rs b/ipc-storage/ipc-decentralized-storage/src/bin/node.rs index d70f6ebd5a..8ee3fa2e50 100644 --- a/ipc-storage/ipc-decentralized-storage/src/bin/node.rs +++ b/ipc-storage/ipc-decentralized-storage/src/bin/node.rs @@ -3,30 +3,43 @@ //! Binary for running a decentralized storage node -use anyhow::{anyhow, Context, Result}; +use anyhow::{Context, Result}; use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use bytes::Bytes; use clap::{Parser, Subcommand}; -use ethers::types::Address as EthAddress; +use ethers::abi::{encode as abi_encode, Token as AbiToken}; +use ethers::types::{Address as EthAddress, U256 as EthU256}; +use fendermint_actor_blobs_shared::execution::{ + ExecutionJob, GetJobParams, JobStatus, ListJobsParams, ListJobsReturn, CLAIM_JOB_SELECTOR, + COMPLETE_JOB_SELECTOR, FAIL_JOB_SELECTOR, +}; use fendermint_actor_blobs_shared::method::Method; use fendermint_actor_blobs_shared::operators::RegisterNodeOperatorParams; use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; use fendermint_rpc::message::{GasParams, SignedMessageFactory}; -use fendermint_rpc::tx::{TxClient, TxCommit}; +use fendermint_rpc::tx::{BoundClient, TxClient, TxCommit}; use fendermint_rpc::FendermintClient; use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::eam::EthAddress as FvmEthAddress; +use fendermint_vm_actor_interface::system; use fendermint_vm_message::query::FvmQueryHeight; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::{set_current_network, Address, Network}; +use fvm_shared::bigint::Zero; use fvm_shared::chainid::ChainID; use fvm_shared::econ::TokenAmount; +use fvm_shared::message::Message; use ipc_decentralized_storage::node::{launch, NodeConfig}; use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use tendermint_rpc::Url; +use tokio::process::Command as TokioCommand; use tracing::info; +const REGISTER_NODE_OPERATOR_SELECTOR: [u8; 4] = [0x71, 0x3b, 0x10, 0xcf]; + #[derive(Parser, Debug)] #[command(name = "ipc-storage-node")] #[command(about = "Decentralized storage node CLI", long_about = None)] @@ -51,6 +64,8 @@ enum Commands { QueryBlob(QueryBlobArgs), /// Query an object from a bucket by key QueryObject(QueryObjectArgs), + /// Run execution worker loop over blobs actor jobs + RunExecutor(RunExecutorArgs), } #[derive(Parser, Debug)] @@ -165,6 +180,25 @@ struct QueryObjectArgs { height: Option, } +#[derive(Parser, Debug)] +struct RunExecutorArgs { + /// Path to file containing the secp256k1 secret key in Base64 format + #[arg(long, env = "SECRET_KEY_FILE", required = true)] + secret_key_file: PathBuf, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Polling interval in seconds + #[arg(long, default_value = "5")] + poll_interval_secs: u64, + + /// Gateway URL for downloading/uploading ipc:// storage objects + #[arg(long, env = "IPC_STORAGE_GATEWAY")] + gateway_url: Option, +} + #[tokio::main] async fn main() -> Result<()> { // Initialize tracing @@ -197,6 +231,7 @@ async fn main() -> Result<()> { Commands::GenerateBlsKey(args) => generate_bls_key(args), Commands::QueryBlob(args) => query_blob(args).await, Commands::QueryObject(args) => query_object(args).await, + Commands::RunExecutor(args) => run_executor(args).await, } } @@ -307,11 +342,13 @@ async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { .context("failed to read secret key")?; let pk = sk.public_key(); - // Use f1 address (secp256k1) instead of f410 (delegated/ethereum) because we're calling - // a native FVM actor with CBOR params, not an EVM contract with calldata - let from_addr = - Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; - info!("Sender address: {}", from_addr); + let from_f1 = Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; + let from_eth = FvmEthAddress::new_secp256k1(&pk.serialize()) + .context("failed to derive delegated address from secret key")?; + let from_f410 = + Address::new_delegated(10, &from_eth.0).context("failed to create f410 address")?; + info!("Sender f1 address: {}", from_f1); + info!("Sender f410 address: {}", from_f410); // Parse chain RPC URL let chain_rpc_url = @@ -321,10 +358,17 @@ async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { let client = FendermintClient::new_http(chain_rpc_url, None) .context("failed to create Fendermint client")?; - // Query the account nonce from the state - let sequence = get_sequence(&client, &from_addr) + // Ensure blobs actor exists on this subnet. + let blobs_actor_state = client + .actor_state(&BLOBS_ACTOR_ADDR, FvmQueryHeight::default()) .await - .context("failed to get account sequence")?; + .context("failed to query blobs actor state")?; + if blobs_actor_state.value.is_none() { + anyhow::bail!( + "blobs actor {} is not deployed on this subnet. Recreate/start the subnet with ipc-storage enabled (fendermint_app built with --features ipc-storage), then retry register-operator.", + BLOBS_ACTOR_ADDR + ); + } // Query the chain ID let chain_id = client @@ -335,13 +379,6 @@ async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { .chain_id; info!("Chain ID: {}", chain_id); - info!("Account sequence: {}", sequence); - - // Create signed message factory - let mf = SignedMessageFactory::new(sk, from_addr, sequence, ChainID::from(chain_id)); - - // Bind the client with the message factory - let mut client = client.bind(mf); // Prepare registration parameters let params = RegisterNodeOperatorParams { @@ -349,46 +386,53 @@ async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { rpc_url: args.operator_rpc_url.clone(), }; - let params_bytes = - RawBytes::serialize(params).context("failed to serialize RegisterNodeOperatorParams")?; - - // Gas params let gas_params = GasParams { gas_limit: 10_000_000, - gas_fee_cap: TokenAmount::from_atto(100), - gas_premium: TokenAmount::from_atto(100), + gas_fee_cap: TokenAmount::from_atto(1_000_000), + gas_premium: TokenAmount::from_atto(100_000), }; - info!("Sending RegisterNodeOperator transaction..."); - - // Send the transaction - let res = TxClient::::transaction( - &mut client, - BLOBS_ACTOR_ADDR, - Method::RegisterNodeOperator as u64, - params_bytes, - TokenAmount::from_atto(0), - gas_params, - ) - .await - .context("failed to send RegisterNodeOperator transaction")?; - - if res.response.check_tx.code.is_err() { - anyhow::bail!( - "RegisterNodeOperator check_tx failed: {}", - res.response.check_tx.log - ); - } - - if res.response.deliver_tx.code.is_err() { + let tx_hash = if let Some(sequence) = get_sequence_opt(&client, &from_f410) + .await + .context("failed to get delegated account sequence")? + { + info!("Using delegated sender (f410) via InvokeContract facade"); + info!("Account sequence: {}", sequence); + let mf = SignedMessageFactory::new(sk, from_f410, sequence, ChainID::from(chain_id)); + let mut client = client.bind(mf); + let calldata = encode_register_node_operator_calldata(¶ms); + let res = TxClient::::fevm_invoke( + &mut client, + BLOBS_ACTOR_ADDR, + calldata, + TokenAmount::from_atto(0), + gas_params, + ) + .await + .context("failed to send delegated RegisterNodeOperator transaction")?; + if res.response.check_tx.code.is_err() { + anyhow::bail!( + "RegisterNodeOperator check_tx failed: {}", + res.response.check_tx.log + ); + } + if res.response.deliver_tx.code.is_err() { + anyhow::bail!( + "RegisterNodeOperator deliver_tx failed: code={:?}, log={}, info={}, gas_used={}", + res.response.deliver_tx.code, + res.response.deliver_tx.log, + res.response.deliver_tx.info, + res.response.deliver_tx.gas_used + ); + } + info!("Sent RegisterNodeOperator transaction with delegated path"); + res.response.hash.to_string() + } else { anyhow::bail!( - "RegisterNodeOperator deliver_tx failed: code={:?}, log={}, info={}, gas_used={}", - res.response.deliver_tx.code, - res.response.deliver_tx.log, - res.response.deliver_tx.info, - res.response.deliver_tx.gas_used + "delegated sender {} not found on-chain; cross-fund this delegated address and retry (native f1 {} is intentionally not used)", + from_f410, from_f1 ); - } + }; info!("āœ“ Successfully registered as node operator!"); info!( @@ -396,21 +440,78 @@ async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { hex::encode(bls_private_key.public_key().as_bytes()) ); info!(" RPC URL: {}", args.operator_rpc_url); - info!(" Tx hash: {}", res.response.hash); + info!(" Tx hash: {}", tx_hash); Ok(()) } -/// Get the next sequence number (nonce) of an account. -async fn get_sequence(client: &impl QueryClient, addr: &Address) -> Result { +/// Get the next sequence number (nonce) of an account if it exists. +async fn get_sequence_opt(client: &impl QueryClient, addr: &Address) -> Result> { let state = client .actor_state(addr, FvmQueryHeight::default()) .await .context("failed to get actor state")?; match state.value { - Some((_id, state)) => Ok(state.sequence), - None => Err(anyhow!("cannot find actor {addr}")), + Some((_id, state)) => Ok(Some(state.sequence)), + None => Ok(None), + } +} + +fn encode_register_node_operator_calldata(params: &RegisterNodeOperatorParams) -> Bytes { + let args = abi_encode(&[ + AbiToken::Bytes(params.bls_pubkey.clone()), + AbiToken::String(params.rpc_url.clone()), + ]); + let mut calldata = Vec::with_capacity(4 + args.len()); + calldata.extend_from_slice(®ISTER_NODE_OPERATOR_SELECTOR); + calldata.extend_from_slice(&args); + Bytes::from(calldata) +} + +fn encode_claim_job_calldata(id: u64) -> Bytes { + let args = abi_encode(&[AbiToken::Uint(EthU256::from(id))]); + let mut calldata = Vec::with_capacity(4 + args.len()); + calldata.extend_from_slice(&CLAIM_JOB_SELECTOR); + calldata.extend_from_slice(&args); + Bytes::from(calldata) +} + +fn encode_complete_job_calldata( + id: u64, + output_refs: Vec, + output_commitment: [u8; 32], + exit_code: i32, +) -> Bytes { + let args = abi_encode(&[ + AbiToken::Uint(EthU256::from(id)), + AbiToken::Array(output_refs.into_iter().map(AbiToken::String).collect()), + AbiToken::FixedBytes(output_commitment.to_vec()), + AbiToken::Int(abi_int256_from_i32(exit_code)), + ]); + let mut calldata = Vec::with_capacity(4 + args.len()); + calldata.extend_from_slice(&COMPLETE_JOB_SELECTOR); + calldata.extend_from_slice(&args); + Bytes::from(calldata) +} + +fn encode_fail_job_calldata(id: u64, reason: String, exit_code: i32) -> Bytes { + let args = abi_encode(&[ + AbiToken::Uint(EthU256::from(id)), + AbiToken::String(reason), + AbiToken::Int(abi_int256_from_i32(exit_code)), + ]); + let mut calldata = Vec::with_capacity(4 + args.len()); + calldata.extend_from_slice(&FAIL_JOB_SELECTOR); + calldata.extend_from_slice(&args); + Bytes::from(calldata) +} + +fn abi_int256_from_i32(value: i32) -> EthU256 { + if value >= 0 { + EthU256::from(value as u32) + } else { + EthU256::MAX - EthU256::from((-value) as u32) + EthU256::from(1u8) } } @@ -599,3 +700,538 @@ async fn query_object(args: QueryObjectArgs) -> Result<()> { Ok(()) } + +async fn run_executor(args: RunExecutorArgs) -> Result<()> { + info!("Starting execution worker loop"); + + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + let client = FendermintClient::new_http(rpc_url, None).context("failed to create client")?; + + let sk = SignedMessageFactory::read_secret_key(&args.secret_key_file) + .context("failed to read secret key")?; + let pk = sk.public_key(); + let from_eth = FvmEthAddress::new_secp256k1(&pk.serialize()) + .context("failed to derive delegated address from secret key")?; + let from_f410 = + Address::new_delegated(10, &from_eth.0).context("failed to create f410 address")?; + + let chain_id = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to get state params")? + .value + .chain_id; + + let sequence = get_sequence_opt(&client, &from_f410) + .await + .context("failed to get delegated account sequence")? + .ok_or_else(|| { + anyhow::anyhow!( + "delegated sender {} not found on-chain; cross-fund this delegated address and retry", + from_f410 + ) + })?; + + info!("Executor sender: {}", from_f410); + info!("Executor chain ID: {}", chain_id); + info!("Executor sequence: {}", sequence); + + let mf = SignedMessageFactory::new(sk, from_f410, sequence, ChainID::from(chain_id)); + let mut tx_client = client.bind(mf); + + let poll_interval = Duration::from_secs(args.poll_interval_secs); + + loop { + let processed = process_pending_jobs( + &mut tx_client, + &from_f410, + args.gateway_url.as_deref(), + &args.rpc_url, + ) + .await; + + match processed { + Ok(0) => { + tokio::time::sleep(poll_interval).await; + } + Ok(n) => { + info!("Processed {} job(s)", n); + } + Err(e) => { + tracing::error!("Executor tick error: {:#}", e); + if let Err(sync_err) = resync_sequence(&mut tx_client, &from_f410).await { + tracing::error!("Failed to resync sequence after error: {:#}", sync_err); + } + tokio::time::sleep(poll_interval).await; + } + } + } +} + +const MAX_TX_RETRIES: u32 = 3; + +/// Re-query the on-chain nonce and update the local message factory. +async fn resync_sequence( + tx_client: &mut (impl BoundClient + QueryClient), + sender: &Address, +) -> Result<()> { + let state = tx_client + .actor_state(sender, FvmQueryHeight::default()) + .await + .context("failed to query actor state for sequence resync")?; + let sequence = state + .value + .map(|(_, s)| s.sequence) + .ok_or_else(|| anyhow::anyhow!("sender {} not found during sequence resync", sender))?; + tx_client.message_factory_mut().set_sequence(sequence); + info!("Resynced sequence to {}", sequence); + Ok(()) +} + +/// Send a transaction to the blobs actor with retry and automatic sequence resync. +/// +/// Returns `Ok(true)` if the transaction was delivered successfully (deliver_tx ok), +/// `Ok(false)` if deliver_tx rejected it (nonce was consumed, move on), +/// `Err` only on unrecoverable failures. +async fn send_executor_tx( + tx_client: &mut (impl BoundClient + QueryClient + TxClient), + sender: &Address, + calldata: Bytes, + label: &str, +) -> Result { + let gas_params = GasParams { + gas_limit: 10_000_000, + gas_fee_cap: TokenAmount::from_atto(1_000_000), + gas_premium: TokenAmount::from_atto(100_000), + }; + + for attempt in 0..MAX_TX_RETRIES { + let res = TxClient::::fevm_invoke( + tx_client, + BLOBS_ACTOR_ADDR, + calldata.clone(), + TokenAmount::zero(), + gas_params.clone(), + ) + .await; + + match res { + Ok(commit_res) => { + if commit_res.response.check_tx.code.is_err() { + // check_tx rejection: nonce NOT consumed on-chain but WAS incremented locally. + let log = &commit_res.response.check_tx.log; + tracing::warn!( + "{} check_tx rejected (attempt {}): code={:?} log={}", + label, + attempt + 1, + commit_res.response.check_tx.code, + if log.is_empty() { "" } else { log.as_str() }, + ); + resync_sequence(tx_client, sender).await?; + tokio::time::sleep(Duration::from_secs(1 << attempt)).await; + continue; + } + if commit_res.response.deliver_tx.code.is_err() { + // deliver_tx failure: nonce WAS consumed, sequence is correct. Not retryable. + let log = &commit_res.response.deliver_tx.log; + let info_str = &commit_res.response.deliver_tx.info; + tracing::warn!( + "{} deliver_tx failed: code={:?} log={} info={}", + label, + commit_res.response.deliver_tx.code, + if log.is_empty() { "" } else { log.as_str() }, + if info_str.is_empty() { "" } else { info_str.as_str() }, + ); + return Ok(false); + } + return Ok(true); + } + Err(e) => { + // Network/transport error: sequence state is unknown. + tracing::warn!( + "{} network error (attempt {}): {:#}", + label, + attempt + 1, + e + ); + resync_sequence(tx_client, sender).await?; + tokio::time::sleep(Duration::from_secs(1 << attempt)).await; + } + } + } + + anyhow::bail!("{} failed after {} retries", label, MAX_TX_RETRIES) +} + +async fn process_pending_jobs( + tx_client: &mut (impl BoundClient + QueryClient + TxClient), + sender: &Address, + gateway_url: Option<&str>, + rpc_url: &str, +) -> Result { + let pending_jobs = list_pending_jobs(tx_client).await?; + if pending_jobs.is_empty() { + return Ok(0); + } + + let mut processed = 0; + + for job in &pending_jobs { + info!( + "Found candidate job {} binary_ref={} args={:?}", + job.id, job.binary_ref, job.args + ); + + // Re-check the job is still pending (another executor may have claimed it). + let latest = get_job(tx_client, job.id).await?; + let Some(latest) = latest else { + info!("Skipping job {}: no longer exists", job.id); + continue; + }; + if latest.status != JobStatus::Pending { + info!( + "Skipping job {}: status is {:?}", + latest.id, latest.status + ); + continue; + } + + // --- Claim --- + let claimed = send_executor_tx( + tx_client, + sender, + encode_claim_job_calldata(job.id), + &format!("ClaimJob({})", job.id), + ) + .await?; + + if !claimed { + info!("Job {} could not be claimed, skipping", job.id); + continue; + } + info!("Claimed job {}", job.id); + + // --- Execute --- + let run_result = execute_job(job, gateway_url, rpc_url).await; + + match run_result { + Ok((exit_code, stdout, stderr)) => { + // Always print job output. + if !stdout.is_empty() { + info!("Job {} stdout:\n{}", job.id, stdout); + } + if !stderr.is_empty() { + info!("Job {} stderr:\n{}", job.id, stderr); + } + info!("Job {} exited with code {}", job.id, exit_code); + + if exit_code == 0 { + let combined = [stdout.as_bytes(), stderr.as_bytes()].concat(); + let output_commitment = + fendermint_actor_blobs_shared::bytes::B256(*blake3::hash(&combined).as_bytes()); + let output_refs = + vec![format!("inline://stdout/{}", hex::encode(output_commitment.0))]; + + let ok = send_executor_tx( + tx_client, + sender, + encode_complete_job_calldata( + job.id, + output_refs, + output_commitment.0, + exit_code, + ), + &format!("CompleteJob({})", job.id), + ) + .await?; + + if ok { + info!("Job {} completed successfully", job.id); + } else { + tracing::warn!("CompleteJob deliver_tx rejected for job {}", job.id); + } + } else { + let reason = + format!("process exited with code {}: {}", exit_code, truncate(&stderr, 512)); + let ok = send_executor_tx( + tx_client, + sender, + encode_fail_job_calldata(job.id, reason, exit_code), + &format!("FailJob({})", job.id), + ) + .await?; + + if ok { + info!("Job {} reported as failed (exit code {})", job.id, exit_code); + } + } + } + Err(e) => { + tracing::error!("Job {} execution error: {:#}", job.id, e); + let reason = format!("execution error: {}", truncate(&e.to_string(), 512)); + let _ = send_executor_tx( + tx_client, + sender, + encode_fail_job_calldata(job.id, reason, -1), + &format!("FailJob({})", job.id), + ) + .await; + } + } + + processed += 1; + } + + Ok(processed) +} + +fn truncate(s: &str, max: usize) -> String { + if s.len() <= max { + s.to_string() + } else { + format!("{}...(truncated)", &s[..max]) + } +} + +/// Execute a job: download ipc:// inputs, run binary with env/timeout, upload outputs. +async fn execute_job( + job: &ExecutionJob, + gateway_url: Option<&str>, + _rpc_url: &str, +) -> Result<(i32, String, String)> { + let work_dir = tempfile::tempdir().context("failed to create temp working directory")?; + let input_dir = work_dir.path().join("input"); + let output_dir = work_dir.path().join("output"); + std::fs::create_dir_all(&input_dir)?; + std::fs::create_dir_all(&output_dir)?; + + let mut env_vars: Vec<(String, String)> = job.env.clone(); + + // Download ipc:// input files. + let http_client = reqwest::Client::new(); + for (i, input_ref) in job.input_refs.iter().enumerate() { + if input_ref.starts_with("ipc://") { + let gw = gateway_url.ok_or_else(|| { + anyhow::anyhow!( + "Job {} has ipc:// input {} but no --gateway-url configured", + job.id, + input_ref + ) + })?; + + let (bucket, key) = parse_ipc_uri(input_ref)?; + let file_name = key.rsplit('/').next().unwrap_or(&key); + let local_path = input_dir.join(file_name); + + info!("Downloading input {} -> {}", input_ref, local_path.display()); + let url = format!( + "{}/v1/objects/{}/{}", + gw.trim_end_matches('/'), + bucket, + urlencoding::encode(&key) + ); + let resp = http_client + .get(&url) + .send() + .await + .with_context(|| format!("failed to download {}", input_ref))?; + if !resp.status().is_success() { + anyhow::bail!( + "Gateway returned {} downloading {}", + resp.status(), + input_ref + ); + } + let data = resp.bytes().await?; + std::fs::write(&local_path, &data)?; + + env_vars.push((format!("IPC_INPUT_{}", i), local_path.to_string_lossy().to_string())); + } else { + env_vars.push((format!("IPC_INPUT_{}", i), input_ref.clone())); + } + } + + // Prepare output file paths for any IPC_OUTPUT_N env vars. + let mut output_uploads: Vec<(String, PathBuf)> = Vec::new(); + for (key, value) in &env_vars { + if key.starts_with("IPC_OUTPUT_") && key != "IPC_OUTPUT_DIR" && value.starts_with("ipc://") { + let idx = key.trim_start_matches("IPC_OUTPUT_"); + let local_out = output_dir.join(format!("output_{}", idx)); + output_uploads.push((value.clone(), local_out.clone())); + } + } + + // Set IPC_OUTPUT_FILE_N vars pointing to writable local paths, + // and IPC_OUTPUT_DIR for convenience. + for (i, (_, local_path)) in output_uploads.iter().enumerate() { + env_vars.push(( + format!("IPC_OUTPUT_FILE_{}", i), + local_path.to_string_lossy().to_string(), + )); + } + env_vars.push(("IPC_OUTPUT_DIR".to_string(), output_dir.to_string_lossy().to_string())); + + // Resolve binary. + let binary = job + .binary_ref + .strip_prefix("local://") + .unwrap_or(&job.binary_ref) + .to_string(); + + let timeout = if job.timeout_secs > 0 { + Duration::from_secs(job.timeout_secs) + } else { + Duration::from_secs(300) + }; + + info!( + "Executing: {} {:?} (timeout {}s)", + binary, + job.args, + timeout.as_secs() + ); + + let child_fut = TokioCommand::new(&binary) + .args(&job.args) + .envs(env_vars.iter().map(|(k, v)| (k.as_str(), v.as_str()))) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .output(); + + let output = tokio::time::timeout(timeout, child_fut) + .await + .map_err(|_| anyhow::anyhow!("job timed out after {}s", timeout.as_secs()))? + .context("failed to spawn/run process")?; + + let code = output.status.code().unwrap_or(-1); + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + + // Upload output files to ipc:// destinations if job succeeded. + if code == 0 && !output_uploads.is_empty() { + if let Some(gw) = gateway_url { + for (ipc_uri, local_path) in &output_uploads { + if !local_path.exists() { + tracing::warn!( + "Output file {} not created by job, skipping upload to {}", + local_path.display(), + ipc_uri + ); + continue; + } + info!("Uploading output {} -> {}", local_path.display(), ipc_uri); + let data = std::fs::read(local_path) + .with_context(|| format!("failed to read output {}", local_path.display()))?; + + let form = reqwest::multipart::Form::new() + .text("size", data.len().to_string()) + .part( + "data", + reqwest::multipart::Part::bytes(data) + .file_name("upload") + .mime_str("application/octet-stream")?, + ); + let url = format!("{}/v1/objects", gw.trim_end_matches('/')); + let resp = http_client.post(&url).multipart(form).send().await?; + if resp.status().is_success() { + info!("Uploaded output to gateway for {}", ipc_uri); + } else { + let status = resp.status(); + let body = resp.text().await.unwrap_or_default(); + tracing::warn!( + "Failed to upload output to {}: {} {}", + ipc_uri, + status, + body + ); + } + } + } else { + tracing::warn!( + "Job produced output files but no --gateway-url configured; skipping upload" + ); + } + } + + Ok((code, stdout, stderr)) +} + +fn parse_ipc_uri(uri: &str) -> Result<(String, String)> { + let path = uri + .strip_prefix("ipc://") + .ok_or_else(|| anyhow::anyhow!("not an ipc:// URI: {}", uri))?; + let (bucket, key) = path.split_once('/').ok_or_else(|| { + anyhow::anyhow!("ipc:// URI must include a key: {}", uri) + })?; + Ok((bucket.to_string(), key.to_string())) +} + +async fn list_pending_jobs(client: &impl QueryClient) -> Result> { + let params = ListJobsParams { + status: Some(JobStatus::Pending), + limit: 50, + }; + let params = RawBytes::serialize(params).context("failed to serialize ListJobs params")?; + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: Method::ListJobs as u64, + params, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute ListJobs query")?; + if response.value.code.is_err() { + anyhow::bail!("ListJobs query failed: {}", response.value.info); + } + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode ListJobs response data")?; + let jobs = fvm_ipld_encoding::from_slice::(&return_data) + .context("failed to decode ListJobs return type")?; + // Double-check client-side in case the actor ignores the status filter. + Ok(jobs + .jobs + .into_iter() + .filter(|j| j.status == JobStatus::Pending) + .collect()) +} + +async fn get_job(client: &impl QueryClient, id: u64) -> Result> { + let params = + RawBytes::serialize(GetJobParams { id }).context("failed to serialize GetJob params")?; + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: Method::GetJob as u64, + params, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetJob query")?; + if response.value.code.is_err() { + anyhow::bail!("GetJob query failed: {}", response.value.info); + } + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode GetJob response data")?; + let job = fvm_ipld_encoding::from_slice::>(&return_data) + .context("failed to decode GetJob return type")?; + Ok(job) +} diff --git a/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs b/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs index db1a163784..c3592f81b5 100644 --- a/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs +++ b/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs @@ -10,12 +10,15 @@ pub mod objects_service; use anyhow::{Context, Result}; use bls_signatures::{aggregate, Serialize as BlsSerialize, Signature as BlsSignature}; +use bytes::Bytes; +use ethers::abi::{encode as abi_encode, Token as AbiToken}; +use ethers::types::U256 as EthU256; use fendermint_actor_blobs_shared::blobs::{ - BlobStatus, FinalizeBlobParams, GetAddedBlobsParams, SubscriptionId, + BlobStatus, GetAddedBlobsParams, SubscriptionId, }; use fendermint_actor_blobs_shared::bytes::B256; use fendermint_actor_blobs_shared::method::Method::{ - FinalizeBlob, GetActiveOperators, GetAddedBlobs, GetOperatorInfo, + GetActiveOperators, GetAddedBlobs, GetOperatorInfo, }; use fendermint_actor_blobs_shared::operators::{ GetActiveOperatorsReturn, GetOperatorInfoParams, OperatorInfo, @@ -23,6 +26,7 @@ use fendermint_actor_blobs_shared::operators::{ use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; use fendermint_rpc::message::GasParams; use fendermint_rpc::tx::{BoundClient, TxClient, TxCommit}; +use fendermint_vm_actor_interface::eam::EthAddress as FvmEthAddress; use fendermint_vm_actor_interface::system; use fendermint_vm_message::query::FvmQueryHeight; use fvm_ipld_encoding::RawBytes; @@ -35,6 +39,24 @@ use std::time::{Duration, Instant}; use tokio::time::sleep; use tracing::{debug, error, info, warn}; +/// keccak256("finalizeBlob(bytes32,address,bytes32,uint64,string,uint8,bytes,uint128)") +const FINALIZE_BLOB_SELECTOR: [u8; 4] = [0xf6, 0x94, 0x17, 0x21]; + +/// Convert an FVM Address to a 20-byte Ethereum address (H160) for ABI encoding. +fn fvm_addr_to_h160(addr: &Address) -> Result { + use fvm_shared::address::Payload; + match addr.payload() { + Payload::Delegated(d) if d.namespace() == 10 && d.subaddress().len() == 20 => { + Ok(ethers::types::H160::from_slice(d.subaddress())) + } + Payload::ID(id) => { + let eth = FvmEthAddress::from_id(*id); + Ok(ethers::types::H160::from_slice(ð.0)) + } + _ => anyhow::bail!("cannot convert address {} to Ethereum H160", addr), + } +} + /// A blob item with its hash, size, and subscribers /// Note: We use B256 for both hash and source to match the actor's return type exactly. /// The actor returns BlobRequest = (B256, u64, HashSet<(Address, SubscriptionId, B256)>) @@ -151,8 +173,8 @@ fn assigned_operator_indices( fn default_gas_params() -> GasParams { GasParams { gas_limit: 10_000_000_000, - gas_fee_cap: TokenAmount::from_atto(100), - gas_premium: TokenAmount::from_atto(100), + gas_fee_cap: TokenAmount::from_atto(1_000_000), + gas_premium: TokenAmount::from_atto(100_000), } } @@ -498,7 +520,7 @@ where info!("Blob {} finalized on-chain and removed from tracking", hash); } Err(e) => { - warn!("Failed to finalize blob {} on-chain: {}", hash, e); + warn!("Failed to finalize blob {} on-chain: {:#}", hash, e); // Keep in tracking to retry later } } @@ -753,9 +775,10 @@ impl BlobGateway where C: fendermint_rpc::QueryClient + BoundClient + TxClient + Send + Sync, { - /// Call finalize_blob on-chain with aggregated signature and bitmap + /// Call finalize_blob on-chain via the EVM facade (InvokeContract). /// - /// This submits a real transaction to the blockchain (not just a query). + /// This encodes the parameters as ABI calldata and uses `fevm_invoke` so that + /// the transaction works with both f1 (native) and f410 (delegated) sender addresses. pub async fn finalize_blob( &mut self, blob_hash: B256, @@ -765,47 +788,66 @@ where ) -> Result<()> { info!("Finalizing blob {} on-chain", blob_hash); - // Serialize aggregated signature let signature_bytes = aggregated_signature.as_bytes().to_vec(); - // Create finalize blob params - let params = FinalizeBlobParams { - source: metadata.source, - subscriber: metadata.subscriber, - hash: blob_hash, - size: metadata.size, - id: metadata.subscription_id.clone(), - status: BlobStatus::Resolved, - aggregated_signature: signature_bytes, + let subscriber_h160 = fvm_addr_to_h160(&metadata.subscriber) + .context("failed to convert subscriber to Ethereum address")?; + + let status_u8: u8 = blob_status_to_u8(BlobStatus::Resolved); + + let calldata = encode_finalize_blob_calldata( + &metadata.source, + subscriber_h160, + &blob_hash, + metadata.size, + &String::from(metadata.subscription_id.clone()), + status_u8, + &signature_bytes, signer_bitmap, - }; + ); + + debug!("FinalizeBlob ABI calldata ({} bytes)", calldata.len()); - let params_bytes = - RawBytes::serialize(params).context("failed to serialize FinalizeBlobParams")?; + let gas = default_gas_params(); + debug!( + "Gas params: limit={}, fee_cap={}, premium={}", + gas.gas_limit, gas.gas_fee_cap, gas.gas_premium + ); - // Submit actual transaction using TxClient - let res = TxClient::::transaction( + let res = match TxClient::::fevm_invoke( &mut self.client, BLOBS_ACTOR_ADDR, - FinalizeBlob as u64, - params_bytes, + calldata, TokenAmount::zero(), - default_gas_params(), + gas, ) .await - .context("failed to send FinalizeBlob transaction")?; + { + Ok(res) => res, + Err(e) => { + error!( + "FinalizeBlob fevm_invoke failed for {}: {:?}", + blob_hash, e + ); + return Err(e).context("failed to send FinalizeBlob transaction"); + } + }; if res.response.check_tx.code.is_err() { anyhow::bail!( - "FinalizeBlob check_tx failed: {}", - res.response.check_tx.log + "FinalizeBlob check_tx failed (code {:?}): log={} info={}", + res.response.check_tx.code, + res.response.check_tx.log, + res.response.check_tx.info, ); } if res.response.deliver_tx.code.is_err() { anyhow::bail!( - "FinalizeBlob deliver_tx failed: {}", - res.response.deliver_tx.log + "FinalizeBlob deliver_tx failed (code {:?}): log={} info={}", + res.response.deliver_tx.code, + res.response.deliver_tx.log, + res.response.deliver_tx.info, ); } @@ -816,3 +858,38 @@ where Ok(()) } } + +fn blob_status_to_u8(status: BlobStatus) -> u8 { + match status { + BlobStatus::Added => 0, + BlobStatus::Pending => 1, + BlobStatus::Resolved => 2, + BlobStatus::Failed => 3, + } +} + +fn encode_finalize_blob_calldata( + source: &B256, + subscriber: ethers::types::H160, + blob_hash: &B256, + size: u64, + subscription_id: &str, + status: u8, + aggregated_signature: &[u8], + signer_bitmap: u128, +) -> Bytes { + let args = abi_encode(&[ + AbiToken::FixedBytes(source.0.to_vec()), + AbiToken::Address(subscriber), + AbiToken::FixedBytes(blob_hash.0.to_vec()), + AbiToken::Uint(EthU256::from(size)), + AbiToken::String(subscription_id.to_string()), + AbiToken::Uint(EthU256::from(status)), + AbiToken::Bytes(aggregated_signature.to_vec()), + AbiToken::Uint(EthU256::from(signer_bitmap)), + ]); + let mut calldata = Vec::with_capacity(4 + args.len()); + calldata.extend_from_slice(&FINALIZE_BLOB_SELECTOR); + calldata.extend_from_slice(&args); + Bytes::from(calldata) +} diff --git a/ipc/cli/Cargo.toml b/ipc/cli/Cargo.toml index f0ac0133e9..35cbd50ec1 100644 --- a/ipc/cli/Cargo.toml +++ b/ipc/cli/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" edition.workspace = true license-file.workspace = true +[features] +default = [] +ipc-storage = ["fendermint_app/ipc-storage"] + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [build-dependencies] @@ -40,7 +44,7 @@ num-derive = "0.3.3" num-bigint = { workspace = true } num-traits = { workspace = true } openssl = { workspace = true } -reqwest = { workspace = true } +reqwest = { workspace = true, features = ["multipart"] } serde = { workspace = true } serde_bytes = "0.11.9" serde_json = { workspace = true } @@ -68,7 +72,7 @@ uuid = { version = "1.0", features = ["v4"] } mime_guess = "2.0" include_dir = "0.7" fendermint_eth_api = { path = "../../fendermint/eth/api" } -fendermint_rpc = { path = "../../fendermint/rpc" } +fendermint_rpc = { path = "../../fendermint/rpc", features = ["ipc-storage"] } tendermint-rpc = { workspace = true } ipc-wallet = { path = "../../ipc/wallet", features = ["with-ethers"] } @@ -82,8 +86,13 @@ fendermint_eth_hardhat = { path = "../../fendermint/eth/hardhat" } fendermint_eth_deployer = { path = "../../fendermint/eth/deployer" } fendermint_app_settings = { path = "../../fendermint/app/settings" } fendermint_vm_actor_interface = { path = "../../fendermint/vm/actor_interface" } +fendermint_vm_message = { path = "../../fendermint/vm/message" } +fendermint_actor_blobs_shared = { path = "../../fendermint/actors/blobs/shared" } +fendermint_actor_bucket = { path = "../../fendermint/actors/bucket" } fendermint_app = { path = "../../fendermint/app" } fendermint_crypto = { path = "../../fendermint/crypto" } ipc_ipld_resolver = { path = "../../ipld/resolver" } contracts-artifacts = { path = "../../contracts-artifacts" } ipc_actors_abis = { path = "../../contract-bindings" } +walkdir = "2.4" +dirs = "5.0" diff --git a/ipc/cli/src/commands/exec/mod.rs b/ipc/cli/src/commands/exec/mod.rs new file mode 100644 index 0000000000..6fcb501442 --- /dev/null +++ b/ipc/cli/src/commands/exec/mod.rs @@ -0,0 +1,412 @@ +// Copyright 2026 Recall Contributors +// SPDX-License-Identifier: MIT + +use anyhow::{Context, Result}; +use clap::{Args, Subcommand, ValueEnum}; +use fendermint_actor_blobs_shared::execution::{ + GetJobParams, JobStatus, ListJobsParams, ListJobsReturn, CREATE_JOB_SELECTOR, +}; +use fendermint_actor_blobs_shared::method::Method; +use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; +use ethers::abi::{encode as abi_encode, Token}; +use ethers::types::U256 as EthU256; +use fendermint_rpc::message::{GasParams, SignedMessageFactory}; +use fendermint_rpc::tx::{TxClient, TxCommit}; +use fendermint_rpc::{FendermintClient, QueryClient}; +use fendermint_vm_actor_interface::eam::EthAddress as FvmEthAddress; +use fendermint_vm_actor_interface::system; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; +use fvm_shared::bigint::Zero; +use fvm_shared::chainid::ChainID; +use fvm_shared::econ::TokenAmount; +use fvm_shared::message::Message; +use std::path::PathBuf; +use std::str::FromStr; +use tendermint_rpc::Url; + +use crate::GlobalArguments; + +#[derive(Debug, Args)] +#[command(name = "exec", about = "execution job commands")] +#[command(args_conflicts_with_subcommands = true)] +pub(crate) struct ExecCommandsArgs { + #[command(subcommand)] + command: Commands, +} + +impl ExecCommandsArgs { + pub async fn handle(&self, _global: &GlobalArguments) -> anyhow::Result<()> { + match &self.command { + Commands::Submit(args) => submit_job(args).await, + Commands::List(args) => list_jobs(args).await, + Commands::Status(args) => status_job(args).await, + } + } +} + +#[derive(Debug, Subcommand)] +pub(crate) enum Commands { + Submit(SubmitJobArgs), + List(ListJobsArgs), + Status(StatusJobArgs), +} + +#[derive(Debug, Clone, ValueEnum)] +enum JobStatusArg { + Pending, + Claimed, + Running, + Succeeded, + Failed, + TimedOut, +} + +impl From for JobStatus { + fn from(value: JobStatusArg) -> Self { + match value { + JobStatusArg::Pending => JobStatus::Pending, + JobStatusArg::Claimed => JobStatus::Claimed, + JobStatusArg::Running => JobStatus::Running, + JobStatusArg::Succeeded => JobStatus::Succeeded, + JobStatusArg::Failed => JobStatus::Failed, + JobStatusArg::TimedOut => JobStatus::TimedOut, + } + } +} + +#[derive(Debug, Args)] +pub(crate) struct SubmitJobArgs { + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + #[arg(long, env = "SECRET_KEY_FILE", required = true)] + secret_key_file: PathBuf, + #[arg(long)] + binary_ref: String, + #[arg(long = "input-ref", help = "Raw input reference (passed as-is to input_refs)")] + input_refs: Vec, + /// ipc:// URI for input file (e.g. ipc://t0123/data.txt). Executor downloads + /// the file and sets IPC_INPUT_N env var pointing to the local path. + #[arg(long = "input")] + inputs: Vec, + /// ipc:// URI for output destination (e.g. ipc://t0123/result.txt). Executor + /// uploads the file written to IPC_OUTPUT_FILE_N after the job completes. + #[arg(long = "output")] + outputs: Vec, + #[arg(long = "arg")] + args: Vec, + #[arg(long = "env", help = "Extra env var in KEY=VALUE format")] + env: Vec, + #[arg(long, default_value = "300")] + timeout_secs: u64, +} + +#[derive(Debug, Args)] +pub(crate) struct ListJobsArgs { + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + #[arg(long)] + status: Option, + #[arg(long, default_value = "20")] + limit: u32, +} + +#[derive(Debug, Args)] +pub(crate) struct StatusJobArgs { + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + #[arg(long)] + id: u64, +} + +async fn submit_job(args: &SubmitJobArgs) -> Result<()> { + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + let client = FendermintClient::new_http(rpc_url, None).context("failed to create client")?; + + let sk = SignedMessageFactory::read_secret_key(&args.secret_key_file) + .context("failed to read secret key")?; + let pk = sk.public_key(); + let from_eth = FvmEthAddress::new_secp256k1(&pk.serialize()) + .context("failed to derive delegated address from secret key")?; + let from_f410 = + Address::new_delegated(10, &from_eth.0).context("failed to create f410 address")?; + + let sequence = get_sequence_opt(&client, &from_f410) + .await + .context("failed to get delegated account sequence")? + .ok_or_else(|| { + anyhow::anyhow!( + "delegated sender {} not found on-chain; fund/initialize delegated account", + from_f410, + ) + })?; + let chain_id = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to get state params")? + .value + .chain_id; + + let mf = SignedMessageFactory::new(sk, from_f410, sequence, ChainID::from(chain_id)); + let mut tx_client = client.bind(mf); + + // Merge --input-ref and --input into input_refs. --input values are ipc:// URIs + // that the executor will download before running the binary. + let mut all_input_refs = args.input_refs.clone(); + all_input_refs.extend(args.inputs.iter().cloned()); + + // Build env: explicit --env pairs + IPC_OUTPUT_N entries from --output flags. + let mut env_pairs = parse_env_pairs(&args.env)?; + for (i, output_uri) in args.outputs.iter().enumerate() { + env_pairs.push((format!("IPC_OUTPUT_{}", i), output_uri.clone())); + } + + let calldata = encode_create_job_calldata( + &args.binary_ref, + &all_input_refs, + &args.args, + &env_pairs, + args.timeout_secs, + ); + + let gas_params = GasParams { + gas_limit: 10_000_000, + gas_fee_cap: TokenAmount::from_atto(1_000_000), + gas_premium: TokenAmount::from_atto(100_000), + }; + let res = TxClient::::fevm_invoke( + &mut tx_client, + BLOBS_ACTOR_ADDR, + calldata.into(), + TokenAmount::zero(), + gas_params, + ) + .await + .context("failed to send CreateJob transaction")?; + + if res.response.check_tx.code.is_err() { + let log = &res.response.check_tx.log; + anyhow::bail!( + "CreateJob check_tx rejected: code={:?} log={}", + res.response.check_tx.code, + if log.is_empty() { "" } else { log.as_str() }, + ); + } + if res.response.deliver_tx.code.is_err() { + let log = &res.response.deliver_tx.log; + let info = &res.response.deliver_tx.info; + anyhow::bail!( + "CreateJob deliver_tx failed: code={:?} log={} info={}", + res.response.deliver_tx.code, + if log.is_empty() { "" } else { log.as_str() }, + if info.is_empty() { "" } else { info.as_str() }, + ); + } + + println!("Job submitted successfully"); + println!(" tx_hash: {}", res.response.hash); + if !all_input_refs.is_empty() { + println!(" inputs:"); + for r in &all_input_refs { + println!(" - {}", r); + } + } + if !args.outputs.is_empty() { + println!(" outputs:"); + for o in &args.outputs { + println!(" - {}", o); + } + } + Ok(()) +} + +async fn list_jobs(args: &ListJobsArgs) -> Result<()> { + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + let client = FendermintClient::new_http(rpc_url, None).context("failed to create client")?; + + let params = ListJobsParams { + status: args.status.clone().map(Into::into), + limit: args.limit, + }; + let jobs = query_list_jobs(&client, params).await?; + println!("Found {} jobs", jobs.jobs.len()); + for job in jobs.jobs { + println!( + "- id={} status={:?} creator={} claimed_by={}", + job.id, + job.status, + job.creator, + job.claimed_by + .map(|a| a.to_string()) + .unwrap_or_else(|| "-".to_string()) + ); + } + Ok(()) +} + +async fn status_job(args: &StatusJobArgs) -> Result<()> { + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + let client = FendermintClient::new_http(rpc_url, None).context("failed to create client")?; + let maybe = query_get_job(&client, args.id).await?; + match maybe { + Some(job) => { + println!("Job {}", job.id); + println!(" status: {:?}", job.status); + println!(" creator: {}", job.creator); + println!( + " claimed_by: {}", + job.claimed_by + .map(|a| a.to_string()) + .unwrap_or_else(|| "-".to_string()) + ); + println!(" binary_ref: {}", job.binary_ref); + if !job.args.is_empty() { + println!(" args: {:?}", job.args); + } + if !job.env.is_empty() { + println!(" env:"); + for (k, v) in &job.env { + println!(" {}={}", k, v); + } + } + println!(" timeout: {}s", job.timeout_secs); + println!(" created: epoch {}", job.created_epoch); + if let Some(ep) = job.started_epoch { + println!(" started: epoch {}", ep); + } + if let Some(ep) = job.completed_epoch { + println!(" completed: epoch {}", ep); + } + if !job.input_refs.is_empty() { + println!(" input_refs:"); + for r in &job.input_refs { + println!(" - {}", r); + } + } + if !job.output_refs.is_empty() { + println!(" output_refs:"); + for r in &job.output_refs { + println!(" - {}", r); + } + } + if let Some(hash) = &job.output_commitment { + println!(" output_hash: 0x{}", hex::encode(hash.0)); + } + if let Some(code) = job.exit_code { + println!(" exit_code: {}", code); + } + if let Some(err) = &job.error { + println!(" error: {}", err); + } + } + None => println!("Job {} not found", args.id), + } + Ok(()) +} + +async fn get_sequence_opt(client: &impl QueryClient, addr: &Address) -> Result> { + let state = client + .actor_state(addr, FvmQueryHeight::default()) + .await + .context("failed to get actor state")?; + match state.value { + Some((_id, state)) => Ok(Some(state.sequence)), + None => Ok(None), + } +} + +fn parse_env_pairs(items: &[String]) -> Result> { + items + .iter() + .map(|item| { + let (k, v) = item + .split_once('=') + .ok_or_else(|| anyhow::anyhow!("invalid --env entry '{}', expected KEY=VALUE", item))?; + Ok((k.to_string(), v.to_string())) + }) + .collect() +} + +fn encode_create_job_calldata( + binary_ref: &str, + input_refs: &[String], + args: &[String], + env: &[(String, String)], + timeout_secs: u64, +) -> Vec { + let mut out = Vec::with_capacity(4 + 256); + out.extend_from_slice(&CREATE_JOB_SELECTOR); + let env_flat: Vec = env.iter().map(|(k, v)| format!("{k}={v}")).collect(); + let encoded = abi_encode(&[ + Token::String(binary_ref.to_string()), + Token::Array(input_refs.iter().cloned().map(Token::String).collect()), + Token::Array(args.iter().cloned().map(Token::String).collect()), + Token::Array(env_flat.into_iter().map(Token::String).collect()), + Token::Uint(EthU256::from(timeout_secs)), + ]); + out.extend_from_slice(&encoded); + out +} + +async fn query_list_jobs(client: &impl QueryClient, params: ListJobsParams) -> Result { + let params = RawBytes::serialize(params).context("failed to serialize ListJobs params")?; + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: Method::ListJobs as u64, + params, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + let response = client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute ListJobs query")?; + if response.value.code.is_err() { + anyhow::bail!("ListJobs query failed: {}", response.value.info); + } + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode ListJobs response data")?; + let jobs = fvm_ipld_encoding::from_slice::(&return_data) + .context("failed to decode ListJobs return type")?; + Ok(jobs) +} + +async fn query_get_job( + client: &impl QueryClient, + id: u64, +) -> Result> { + let params = RawBytes::serialize(GetJobParams { id }).context("failed to serialize GetJob params")?; + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: Method::GetJob as u64, + params, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + let response = client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetJob query")?; + if response.value.code.is_err() { + anyhow::bail!("GetJob query failed: {}", response.value.info); + } + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode GetJob response data")?; + let job = + fvm_ipld_encoding::from_slice::>( + &return_data, + ) + .context("failed to decode GetJob return type")?; + Ok(job) +} diff --git a/ipc/cli/src/commands/mod.rs b/ipc/cli/src/commands/mod.rs index 1fd0128a27..64129567f9 100644 --- a/ipc/cli/src/commands/mod.rs +++ b/ipc/cli/src/commands/mod.rs @@ -5,9 +5,12 @@ mod checkpoint; mod config; mod crossmsg; +#[cfg(feature = "ipc-storage")] +mod exec; // mod daemon; mod deploy; mod node; +mod storage; mod subnet; mod ui; mod util; @@ -38,7 +41,10 @@ use std::str::FromStr; use crate::commands::config::ConfigCommandsArgs; use crate::commands::deploy::{DeployCommand, DeployCommandArgs}; +#[cfg(feature = "ipc-storage")] +use crate::commands::exec::ExecCommandsArgs; use crate::commands::node::NodeCommandsArgs; +use crate::commands::storage::StorageCommandsArgs; use crate::commands::validator::ValidatorCommandsArgs; use crate::commands::wallet::WalletCommandsArgs; use crate::CommandLineHandler; @@ -62,6 +68,9 @@ enum Commands { Deploy(DeployCommandArgs), Ui(UICommandArgs), Node(NodeCommandsArgs), + Storage(StorageCommandsArgs), + #[cfg(feature = "ipc-storage")] + Exec(ExecCommandsArgs), } #[derive(Debug, Parser)] @@ -157,6 +166,9 @@ pub async fn cli() -> anyhow::Result<()> { Commands::Deploy(args) => DeployCommand::handle(global, args).await, Commands::Ui(args) => run_ui_command(global.clone(), args.clone()).await, Commands::Node(args) => args.handle(global).await, + Commands::Storage(args) => args.handle(global).await, + #[cfg(feature = "ipc-storage")] + Commands::Exec(args) => args.handle(global).await, }; r.with_context(|| format!("error processing command {:?}", args.command)) diff --git a/ipc/cli/src/commands/node/config.rs b/ipc/cli/src/commands/node/config.rs index a52f3b3721..935d3946f8 100644 --- a/ipc/cli/src/commands/node/config.rs +++ b/ipc/cli/src/commands/node/config.rs @@ -51,15 +51,16 @@ fn serialize_toml_override(value: &Option, serializer: S) -> Res where S: serde::Serializer, { - // use serde::ser::Error; // Not needed for this implementation + use serde::ser::Error; match value { Some(value) => { - let s = value.to_string(); + let s = toml::to_string_pretty(value).map_err(S::Error::custom)?; if s.trim().is_empty() { serializer.serialize_none() } else { - serializer.serialize_str(&s) + // Keep the generated YAML as a TOML literal block string. + serializer.serialize_str(s.trim_end()) } } None => serializer.serialize_none(), diff --git a/ipc/cli/src/commands/node/init.rs b/ipc/cli/src/commands/node/init.rs index a2911aa0ae..c02e05cd09 100644 --- a/ipc/cli/src/commands/node/init.rs +++ b/ipc/cli/src/commands/node/init.rs @@ -23,6 +23,8 @@ use fendermint_app::cmd::key::{convert_key_to_cometbft, store_key}; use fendermint_crypto::SecretKey; use ipc_api::subnet_id::SubnetID; use ipc_provider::IpcProvider; +use ipc_wallet::EvmKeyStore; +use rand::thread_rng; use std::path::Path; use crate::commands::subnet::create_genesis::{create_genesis, CreatedGenesis}; @@ -70,6 +72,7 @@ impl CommandLineHandler for InitNode { .context("failed to parse Fendermint configuration overrides")?; let home_paths = ensure_directories(&config.home).await?; + let validator_key_kind = wallet_type_to_account_kind(&config.key.wallet_type)?; let provider = get_ipc_provider(global).context("failed to initialize IPC provider")?; @@ -86,7 +89,14 @@ impl CommandLineHandler for InitNode { init_cometbft(&home_paths.comet_bft, &secret_key, &cometbft_overrides).await?; - init_fendermint(&home_paths.fendermint, &secret_key, &fendermint_overrides).await?; + init_fendermint( + &home_paths.fendermint, + &secret_key, + validator_key_kind, + &fendermint_overrides, + ) + .await?; + ensure_resolver_network_key(&home_paths.fendermint).await?; convert_genesis_to_tendermint(&genesis, &home_paths.comet_bft).await?; @@ -233,6 +243,17 @@ async fn import_and_store_key( ) -> Result { log::info!("Importing validator key"); + if key_config.private_key.is_none() && key_config.path.is_none() { + if key_config.wallet_type.eq_ignore_ascii_case("evm") { + log::info!( + "No validator key provided in node config; falling back to default EVM wallet key" + ); + return load_default_evm_validator_key(provider); + } + + bail!("validator key source missing; set key.private-key or key.path in node config"); + } + let imported_wallet = import_wallet(provider, key_config) .context("failed to import wallet - check key format and permissions")?; @@ -245,6 +266,32 @@ async fn import_and_store_key( Ok(secret_key) } +fn load_default_evm_validator_key(provider: &IpcProvider) -> Result { + let keystore = provider + .evm_wallet() + .context("failed to access EVM keystore for default validator key fallback")?; + let mut keystore = keystore.write().unwrap(); + + let default_addr = keystore + .get_default() + .context("failed to read default EVM key from keystore")? + .context("no default EVM key configured in IPC wallet")?; + + let key_info = keystore + .get(&default_addr) + .context("failed to load default EVM key info from keystore")? + .context("default EVM key was not found in keystore")?; + + let secret_key = SecretKey::try_from(key_info.private_key().to_vec()) + .context("default EVM key is not a valid secp256k1 validator key")?; + + log::info!( + "Using default EVM key {} as validator key", + default_addr.to_string() + ); + Ok(secret_key) +} + /// Handle subnet joining if configured async fn handle_subnet_join( global: &GlobalArguments, @@ -358,6 +405,7 @@ async fn init_cometbft( async fn init_fendermint( home: &Path, secret_key: &SecretKey, + validator_key_kind: &'static str, overrides: &Option, ) -> Result<()> { log::info!("Initializing Fendermint"); @@ -416,10 +464,97 @@ async fn init_fendermint( ) })?; + ensure_validator_signing_config(home, validator_key_kind, overrides)?; + log::info!("Fendermint initialized successfully"); Ok(()) } +/// Ensure validator key config exists so legacy topdown vote publishing runs in validator mode. +fn ensure_validator_signing_config( + home: &Path, + validator_key_kind: &'static str, + overrides: &Option, +) -> Result<()> { + if overrides + .as_ref() + .and_then(|o| o.extra.get("validator_key")) + .is_some() + { + log::debug!("Skipping validator_key config write because override already sets it"); + return Ok(()); + } + + let config_path = home.join("config/default.toml"); + let validator_key_config = toml::from_str::(&format!( + r#"[validator_key] +path = "validator.sk" +kind = "{validator_key_kind}" +"# + )) + .context("failed to build validator_key override configuration")?; + + merge_toml_config(&config_path, &validator_key_config).with_context(|| { + format!( + "failed to write validator_key config to {}", + config_path.display() + ) + })?; + + Ok(()) +} + +fn wallet_type_to_account_kind(wallet_type: &str) -> Result<&'static str> { + if wallet_type.eq_ignore_ascii_case("evm") { + Ok("ethereum") + } else if wallet_type.eq_ignore_ascii_case("fvm") { + Ok("regular") + } else { + bail!( + "unsupported wallet type '{}' for validator key kind mapping", + wallet_type + ); + } +} + +/// Ensure resolver identity key exists for IPLD resolver service. +/// +/// Legacy topdown mode requires a libp2p key at `fendermint/keys/network.sk`. +/// If it's missing, node start fails even when node init succeeds. +async fn ensure_resolver_network_key(fendermint_home: &Path) -> Result<()> { + let keys_dir = fendermint_home.join("keys"); + let network_sk = keys_dir.join("network.sk"); + + if tokio::fs::try_exists(&network_sk).await? { + log::debug!( + "Resolver network key already exists at {}", + network_sk.display() + ); + return Ok(()); + } + + tokio::fs::create_dir_all(&keys_dir) + .await + .with_context(|| { + format!( + "failed to create Fendermint resolver keys directory at {}", + keys_dir.display() + ) + })?; + + let mut rng = thread_rng(); + let network_key = SecretKey::random(&mut rng); + store_key(&network_key, "network", &keys_dir).with_context(|| { + format!( + "failed to generate resolver network key files in {}", + keys_dir.display() + ) + })?; + + log::info!("Generated resolver network key at {}", network_sk.display()); + Ok(()) +} + /// Convert genesis to Tendermint format async fn convert_genesis_to_tendermint( genesis: &CreatedGenesis, diff --git a/ipc/cli/src/commands/storage/client/bucket.rs b/ipc/cli/src/commands/storage/client/bucket.rs new file mode 100644 index 0000000000..acf5b71ec6 --- /dev/null +++ b/ipc/cli/src/commands/storage/client/bucket.rs @@ -0,0 +1,357 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Bucket subcommand for creating and managing storage buckets on-chain. + +use anyhow::{anyhow, Context, Result}; +use async_trait::async_trait; +use clap::{Args, Subcommand}; +use ethers::abi::{encode as abi_encode, Token}; +use num_traits::Zero; +use std::collections::HashMap; +use std::path::PathBuf; + +use fendermint_rpc::client::FendermintClient; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::tx::{TxClient, TxCommit}; +use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::adm::{self, Kind, ListMetadataParams, Method as AdmMethod}; +use fendermint_vm_actor_interface::eam::EthAddress; +use fendermint_vm_actor_interface::evm; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::{BytesSer, RawBytes}; +use fvm_shared::address::Address; +use fvm_shared::chainid::ChainID; +use fvm_shared::econ::TokenAmount; + +use crate::commands::storage::client_context::{ + resolve_default_owner_from_client_config, resolve_rpc_url, resolve_write_context, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +#[command(name = "bucket", about = "Create and manage storage buckets")] +pub struct BucketCommandArgs { + #[command(subcommand)] + command: BucketCommands, +} + +#[derive(Debug, Subcommand)] +pub enum BucketCommands { + /// Create a new storage bucket + Create(CreateBucketArgs), + /// List buckets owned by an address + List(ListBucketsArgs), +} + +impl BucketCommandArgs { + pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> { + match &self.command { + BucketCommands::Create(args) => CreateBucket::handle(global, args).await, + BucketCommands::List(args) => ListBuckets::handle(global, args).await, + } + } +} + +// --------------------------------------------------------------------------- +// Create +// --------------------------------------------------------------------------- + +#[derive(Debug, Args)] +pub struct CreateBucketArgs { + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Optional owner address (defaults to the operator key address) + #[arg(long)] + pub owner: Option, + + /// Optional metadata key=value pairs (can be repeated) + #[arg(long = "metadata", value_name = "KEY=VALUE")] + pub metadata: Vec, +} + +pub struct CreateBucket; + +#[async_trait] +impl CommandLineHandler for CreateBucket { + type Arguments = CreateBucketArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let write_ctx = resolve_write_context(global, args.config.clone())?; + let rpc_url = write_ctx.rpc_url; + let secret_key = write_ctx.secret_key; + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let chain_id = crate::commands::storage::bucket::query_chain_id(&fm_client) + .await + .context("Failed to query chain ID")?; + + let pub_key = secret_key.public_key(); + let sender_eth = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated sender address from signer key")?; + let addr = Address::new_delegated(10, &sender_eth.0) + .context("failed to construct delegated sender address")?; + + // Parse owner address — ADM requires delegated (f410) address + let owner = if let Some(ref owner_str) = args.owner { + crate::require_fil_addr_from_str(owner_str)? + } else { + let eth_addr = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated address")?; + Address::new_delegated(10, ð_addr.0).context("failed to construct f410 address")? + }; + + let state = fm_client + .actor_state(&addr, FvmQueryHeight::default()) + .await + .context("Failed to get actor state")?; + let sequence = state.value.map(|(_, s)| s.sequence).ok_or_else(|| { + anyhow!( + "sender actor {} does not exist on-chain at {}. Fund/initialize this delegated \ + address first.", + addr, + rpc_url + ) + })?; + + let mf = SignedMessageFactory::new(secret_key, addr, sequence, ChainID::from(chain_id)); + let mut bound_client = fm_client.bind(mf); + + // Parse metadata + let metadata = parse_metadata(&args.metadata)?; + + let owner_eth = ipc_api::evm::payload_to_evm_address(owner.payload()) + .context("bucket owner must be an EVM/delegated address")?; + let (calldata, invoke_params) = if metadata.is_empty() { + let mut calldata = Vec::with_capacity(4 + 32); + // createBucket(address) + calldata.extend_from_slice(&[0xf6, 0xd6, 0xc4, 0x20]); + calldata.extend_from_slice(&abi_encode(&[Token::Address(owner_eth)])); + let invoke_params = RawBytes::serialize(BytesSer(&calldata)) + .context("Failed to serialize FEVM calldata for createBucket(address)")?; + (calldata, invoke_params) + } else { + let metadata_tokens: Vec = metadata + .iter() + .map(|(k, v)| Token::Tuple(vec![Token::String(k.clone()), Token::String(v.clone())])) + .collect(); + let mut calldata = Vec::with_capacity(4 + 128); + // createBucket(address,(string,string)[]) + calldata.extend_from_slice(&[0xe1, 0x29, 0xed, 0x90]); + calldata.extend_from_slice(&abi_encode(&[ + Token::Address(owner_eth), + Token::Array(metadata_tokens), + ])); + let invoke_params = RawBytes::serialize(BytesSer(&calldata)) + .context("Failed to serialize FEVM calldata for createBucket(address,metadata)")?; + (calldata, invoke_params) + }; + let gas_params = crate::commands::storage::bucket::tx_gas_params( + &bound_client, + addr, + adm::ADM_ACTOR_ADDR, + evm::Method::InvokeContract as u64, + invoke_params, + TokenAmount::zero(), + ) + .await + .context("Failed to estimate CreateExternal gas parameters")?; + + println!("Creating bucket..."); + + let res = TxClient::::fevm_invoke( + &mut bound_client, + adm::ADM_ACTOR_ADDR, + calldata.into(), + TokenAmount::zero(), + gas_params, + ) + .await + .map_err(|e| { + anyhow!( + "Failed to send CreateExternal transaction: {} (sender={} owner={} rpc={})", + e, + addr, + owner, + rpc_url + ) + })?; + + if res.response.check_tx.code.is_err() { + let log = &res.response.check_tx.log; + let info = &res.response.check_tx.info; + return Err(anyhow!( + "CreateExternal check_tx failed (code {:?}): log={} info={} sender={} owner={} rpc={}", + res.response.check_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + addr, + owner, + rpc_url + )); + } + + if res.response.deliver_tx.code.is_err() { + let log = &res.response.deliver_tx.log; + let info = &res.response.deliver_tx.info; + return Err(anyhow!( + "CreateExternal deliver_tx failed (code {:?}): log={} info={} sender={} owner={} rpc={}", + res.response.deliver_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + addr, + owner, + rpc_url + )); + } + + println!("Bucket created successfully!"); + println!(" Owner: {}", owner); + println!(" Tx hash: {}", res.response.hash); + println!("Run `ipc-cli storage client bucket list` to see the new bucket address."); + + Ok(()) + } +} + +// --------------------------------------------------------------------------- +// List +// --------------------------------------------------------------------------- + +#[derive(Debug, Args)] +pub struct ListBucketsArgs { + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Owner address to list buckets for (defaults to operator key address) + #[arg(long)] + pub owner: Option, + + /// Output in JSON format + #[arg(long)] + pub json: bool, +} + +pub struct ListBuckets; + +#[async_trait] +impl CommandLineHandler for ListBuckets { + type Arguments = ListBucketsArgs; + + async fn handle(_global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let rpc_url = resolve_rpc_url(args.config.clone())?; + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + // Determine the owner address + let owner = if let Some(ref owner_str) = args.owner { + crate::require_fil_addr_from_str(owner_str)? + } else if let Some(owner_from_cfg) = resolve_default_owner_from_client_config(args.config.clone())? { + owner_from_cfg + } else { + return Err(anyhow!( + "No default owner configured. Pass --owner, or set `address` in storage client config." + )); + }; + + let params = ListMetadataParams { owner }; + let params_bytes = + RawBytes::serialize(params).context("Failed to serialize ListMetadataParams")?; + + let msg = fvm_shared::message::Message { + version: Default::default(), + from: fendermint_vm_actor_interface::system::SYSTEM_ACTOR_ADDR, + to: adm::ADM_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: AdmMethod::ListMetadata as u64, + params: params_bytes, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = fm_client + .call(msg, FvmQueryHeight::default()) + .await + .with_context(|| { + format!( + "Failed to query ListMetadata (owner={} rpc={})", + owner, rpc_url + ) + })?; + + if response.value.code.is_err() { + return Err(anyhow!( + "ListMetadata query failed: {} (owner={} rpc={})", + response.value.info, + owner, + rpc_url + )); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("Failed to decode response data")?; + + let results: Vec = fvm_ipld_encoding::from_slice(&return_data) + .context("Failed to decode ListMetadata response")?; + + if args.json { + let json_items: Vec = results + .iter() + .map(|m| { + serde_json::json!({ + "kind": format!("{}", m.kind), + "address": m.address.to_string(), + "metadata": m.metadata, + }) + }) + .collect(); + println!("{}", serde_json::to_string_pretty(&json_items)?); + } else { + let buckets: Vec<&adm::Metadata> = results + .iter() + .filter(|m| matches!(m.kind, Kind::Bucket)) + .collect(); + + if buckets.is_empty() { + println!("No buckets found for {}", owner); + } else { + println!("{:<50} METADATA", "ADDRESS"); + println!("{}", "-".repeat(80)); + for m in &buckets { + let meta_str = if m.metadata.is_empty() { + String::from("-") + } else { + m.metadata + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect::>() + .join(", ") + }; + println!("{:<50} {}", m.address, meta_str); + } + println!("\nTotal: {} buckets", buckets.len()); + } + } + + Ok(()) + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn parse_metadata(pairs: &[String]) -> Result> { + let mut map = HashMap::new(); + for pair in pairs { + let (key, value) = pair + .split_once('=') + .ok_or_else(|| anyhow!("Invalid metadata format '{}', expected KEY=VALUE", pair))?; + map.insert(key.to_string(), value.to_string()); + } + Ok(map) +} diff --git a/ipc/cli/src/commands/storage/client/cat.rs b/ipc/cli/src/commands/storage/client/cat.rs new file mode 100644 index 0000000000..897dcb9ec8 --- /dev/null +++ b/ipc/cli/src/commands/storage/client/cat.rs @@ -0,0 +1,75 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Cat command for displaying file contents from storage + +use anyhow::{anyhow, Result}; +use clap::Args; +use std::io::{self, Write}; +use std::path::PathBuf; + +use async_trait::async_trait; +use fendermint_rpc::client::FendermintClient; + +use crate::commands::storage::{ + client::download_object_data, + client_context::resolve_rpc_url, + config::resolve_client_gateway_url, + gateway::GatewayClient, + path, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct CatArgs { + /// Storage path (ipc://bucket_address/path/to/file) + #[arg(value_name = "PATH")] + pub path: String, + + /// Gateway URL (overrides config and env var) + #[arg(long)] + pub gateway: Option, + + /// Storage config file + #[arg(long)] + pub config: Option, +} + +pub struct CatStorage; + +#[async_trait] +impl CommandLineHandler for CatStorage { + type Arguments = CatArgs; + + async fn handle(_global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let storage_path = path::StoragePath::parse(&args.path)?; + + if storage_path.is_bucket_root() { + return Err(anyhow!( + "Path must include a file key, not just a bucket address" + )); + } + + let gateway_url = + resolve_client_gateway_url(args.gateway.as_deref(), args.config.clone(), false)?; + let rpc_url = resolve_rpc_url(args.config.clone())?; + + let gateway = GatewayClient::new(gateway_url.clone())?; + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let data = download_object_data( + &gateway, + &gateway_url, + &fm_client, + &rpc_url, + storage_path.bucket_address, + &storage_path.key, + ) + .await?; + + io::stdout().write_all(&data)?; + io::stdout().flush()?; + + Ok(()) + } +} diff --git a/ipc/cli/src/commands/storage/client/cp.rs b/ipc/cli/src/commands/storage/client/cp.rs new file mode 100644 index 0000000000..48b2a779d5 --- /dev/null +++ b/ipc/cli/src/commands/storage/client/cp.rs @@ -0,0 +1,539 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Copy command for storage operations +//! +//! Supports three modes: +//! - local -> ipc:// : Upload to storage +//! - ipc:// -> local : Download from storage +//! - ipc:// -> ipc:// : Copy between buckets + +use anyhow::{anyhow, Context, Result}; +use clap::Args; +use fs_err as fs; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use async_trait::async_trait; +use fendermint_rpc::client::FendermintClient; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::eam::EthAddress; +use fvm_shared::address::Address; +use fvm_shared::chainid::ChainID; + +use crate::commands::storage::{ + bucket, + client::download_object_data, + client_context::{resolve_rpc_url, resolve_write_context}, + config::resolve_client_gateway_url, + gateway::GatewayClient, + path, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct CopyArgs { + /// Source path (local or ipc://) + #[arg(value_name = "SOURCE")] + pub source: String, + + /// Destination path (local or ipc://) + #[arg(value_name = "DEST")] + pub dest: String, + + /// Gateway URL (overrides config and env var) + #[arg(long)] + pub gateway: Option, + + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Recursive copy (for directories) + #[arg(short, long)] + pub recursive: bool, + + /// Overwrite existing objects + #[arg(long)] + pub overwrite: bool, +} + +pub struct CopyStorage; + +#[async_trait] +impl CommandLineHandler for CopyStorage { + type Arguments = CopyArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let source_is_storage = path::is_storage_path(&args.source); + let dest_is_storage = path::is_storage_path(&args.dest); + + match (source_is_storage, dest_is_storage) { + (false, true) => { + // Local -> Storage (upload) + upload_to_storage(global, args).await + } + (true, false) => { + // Storage -> Local (download) + download_from_storage(global, args).await + } + (true, true) => { + // Storage -> Storage (copy between buckets) + copy_between_buckets(global, args).await + } + (false, false) => Err(anyhow!( + "At least one path must be a storage path (ipc://...)" + )), + } + } +} + +/// Upload a local file to storage +async fn upload_to_storage(global: &GlobalArguments, args: &CopyArgs) -> Result<()> { + let local_path = Path::new(&args.source); + let storage_path = path::StoragePath::parse(&args.dest)?; + + if storage_path.is_bucket_root() { + return Err(anyhow!( + "Destination must include a key/path, not just bucket address" + )); + } + + // Handle recursive directory upload + if local_path.is_dir() { + if !args.recursive { + return Err(anyhow!("Cannot copy directory without -r/--recursive flag")); + } + return upload_directory(global, local_path, &storage_path, args).await; + } + + // Upload single file + upload_file(global, local_path, &storage_path, args).await +} + +/// Upload a single file to storage +async fn upload_file( + global: &GlobalArguments, + local_path: &Path, + storage_path: &path::StoragePath, + args: &CopyArgs, +) -> Result<()> { + println!( + "Uploading {} -> {}", + local_path.display(), + storage_path.to_uri() + ); + + // Read file data + let data = fs::read(local_path) + .with_context(|| format!("Failed to read file: {}", local_path.display()))?; + + let write_ctx = resolve_write_context(global, args.config.clone())?; + let rpc_url = write_ctx.rpc_url; + let secret_key = write_ctx.secret_key; + + // Get gateway URL + let gateway_url = + resolve_client_gateway_url(args.gateway.as_deref(), args.config.clone(), true)?; + + // Upload to gateway + let gateway_client = GatewayClient::new(gateway_url.clone())?; + let upload_response = gateway_client + .upload_blob(data) + .await + .with_context(|| { + format!( + "Failed to upload blob to gateway (file={} gateway={} bucket={} key={})", + local_path.display(), + gateway_url, + storage_path.bucket_address, + storage_path.key + ) + })?; + + println!( + "Uploaded blob: {} ({} bytes, {} chunks)", + upload_response.hash, upload_response.original_len, upload_response.num_chunks + ); + + // Convert hash to B256 (auto-detects hex or base32) + let blob_hash = + bucket::hash_to_b256(&upload_response.hash).context("Invalid blob hash from gateway")?; + + // Get node info for source ID (auto-detects hex or base32) + let node_info = gateway_client.get_node_info().await.with_context(|| { + format!( + "Failed to fetch gateway node info from {} (needed for source node id)", + gateway_url + ) + })?; + let source = + bucket::hash_to_b256(&node_info.node_id).context("Invalid node ID from gateway")?; + + // Register object on-chain + println!("Registering object on-chain..."); + + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let chain_id = bucket::query_chain_id(&fm_client) + .await + .with_context(|| format!("Failed to query chain ID from rpc={}", rpc_url))?; + + let pub_key = secret_key.public_key(); + let eth_addr = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated address")?; + let addr = + Address::new_delegated(10, ð_addr.0).context("failed to construct f410 address")?; + let state = fm_client + .actor_state( + &addr, + fendermint_vm_message::query::FvmQueryHeight::default(), + ) + .await + .with_context(|| format!("Failed to get actor state for sender {} via rpc={}", addr, rpc_url))?; + let sequence = state.value.map(|(_, s)| s.sequence).ok_or_else(|| { + anyhow!( + "sender actor {} does not exist on-chain at {}. Fund/initialize this delegated \ + address first.", + addr, + rpc_url + ) + })?; + + // When --overwrite is set, remove the existing key before addObject (the EVM + // addObject selector does not accept an overwrite flag). + let key_exists = if args.overwrite { + let listed = bucket::list_objects( + &fm_client, + storage_path.bucket_address, + Some(storage_path.key.clone()), + None, + None, + 2, + ) + .await + .context("Failed to check if destination key exists")?; + listed + .objects + .iter() + .any(|(k, _)| k.as_slice() == storage_path.key.as_bytes()) + } else { + false + }; + + let mf = SignedMessageFactory::new(secret_key, addr, sequence, ChainID::from(chain_id)); + let mut bound_client = fm_client.bind(mf); + + let mut deleted_old = false; + if key_exists { + match bucket::delete_object( + &mut bound_client, + storage_path.bucket_address, + storage_path.key.clone(), + ) + .await + { + Ok(()) => { + deleted_old = true; + } + Err(e) => { + eprintln!( + "⚠ Could not delete existing key before overwrite ({}); will attempt add anyway", + e + ); + } + } + } + + let add_result = bucket::add_object( + &mut bound_client, + storage_path.bucket_address, + source, + storage_path.key.clone(), + blob_hash, + blob_hash, // recovery_hash (same as blob hash for now) + upload_response.original_len as u64, + HashMap::new(), // metadata + upload_response.data_shards as u16, + upload_response.parity_shards as u16, + ) + .await; + + match add_result { + Ok(()) => {} + Err(e) if key_exists && !deleted_old => { + let msg = e.to_string(); + if msg.contains("key exists") { + println!( + "⚠ Object already registered on-chain with same key (blob pending finalization). \ + Skipping re-registration." + ); + } else { + return Err(e).context("Failed to register object on-chain"); + } + } + Err(e) => { + return Err(e).context("Failed to register object on-chain"); + } + } + + println!( + "āœ“ Successfully uploaded and registered: {}", + storage_path.key + ); + + Ok(()) +} + +/// Upload a directory recursively +async fn upload_directory( + global: &GlobalArguments, + local_dir: &Path, + storage_base: &path::StoragePath, + args: &CopyArgs, +) -> Result<()> { + println!("Uploading directory {} recursively...", local_dir.display()); + + // Walk directory and upload each file + for entry in walkdir::WalkDir::new(local_dir) + .follow_links(false) + .into_iter() + .filter_map(|e| e.ok()) + { + if entry.file_type().is_file() { + let rel_path = entry.path().strip_prefix(local_dir)?; + let rel_path_str = rel_path.to_string_lossy().to_string(); + + // Construct storage key + let storage_key = if storage_base.key.is_empty() { + rel_path_str + } else { + format!( + "{}/{}", + storage_base.key.trim_end_matches('/'), + rel_path_str + ) + }; + + let file_storage_path = path::StoragePath { + bucket_address: storage_base.bucket_address, + key: storage_key, + }; + + // Upload file + upload_file(global, entry.path(), &file_storage_path, args).await?; + } + } + + println!("āœ“ Directory upload complete"); + Ok(()) +} + +/// Download a file or directory from storage to local. +/// +/// A path is treated as a directory prefix when: +/// - the key is empty (bucket root), OR +/// - the key ends with `/` (explicit directory marker), OR +/// - `-r`/`--recursive` is given (user intends to download everything under the prefix) +async fn download_from_storage(global: &GlobalArguments, args: &CopyArgs) -> Result<()> { + let storage_path = path::StoragePath::parse(&args.source)?; + let local_path = Path::new(&args.dest); + + let is_dir_path = + storage_path.is_bucket_root() || storage_path.key.ends_with('/') || args.recursive; + + if is_dir_path { + if !args.recursive { + return Err(anyhow!( + "Source looks like a directory prefix. Use -r/--recursive to download it." + )); + } + return download_directory(global, &storage_path, local_path, args).await; + } + + download_file(&storage_path, local_path, args).await +} + +/// Download a single file from storage +async fn download_file( + storage_path: &path::StoragePath, + local_path: &Path, + args: &CopyArgs, +) -> Result<()> { + println!( + "Downloading {} -> {}", + storage_path.to_uri(), + local_path.display() + ); + + let gateway_url = + resolve_client_gateway_url(args.gateway.as_deref(), args.config.clone(), true)?; + let rpc_url = resolve_rpc_url(args.config.clone())?; + + let gateway = GatewayClient::new(gateway_url.clone())?; + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let data = download_object_data( + &gateway, + &gateway_url, + &fm_client, + &rpc_url, + storage_path.bucket_address, + &storage_path.key, + ) + .await?; + + // Create parent directories if needed + if let Some(parent) = local_path.parent() { + fs::create_dir_all(parent)?; + } + + // Write to file + fs::write(local_path, data)?; + + println!("āœ“ Downloaded {} bytes", fs::metadata(local_path)?.len()); + + Ok(()) +} + +/// Download a directory recursively (list objects with prefix) +async fn download_directory( + _global: &GlobalArguments, + storage_base: &path::StoragePath, + local_dir: &Path, + args: &CopyArgs, +) -> Result<()> { + println!( + "Downloading directory {} recursively...", + storage_base.to_uri() + ); + + let rpc_url = resolve_rpc_url(args.config.clone())?; + + let gateway_url = + resolve_client_gateway_url(args.gateway.as_deref(), args.config.clone(), true)?; + + let gateway_client = GatewayClient::new(gateway_url.clone())?; + + // List all objects with the prefix + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let prefix = if storage_base.is_bucket_root() { + None + } else { + Some(storage_base.key.clone()) + }; + + let mut start_key = None; + let mut downloaded_count = 0; + + loop { + let list_result = bucket::list_objects( + &fm_client, + storage_base.bucket_address, + prefix.clone(), + None, // no delimiter for recursive listing + start_key, + 100, + ) + .await + .with_context(|| { + format!( + "Failed to list objects for recursive download (bucket={} prefix={} rpc={})", + storage_base.bucket_address, + prefix.as_deref().unwrap_or(""), + rpc_url + ) + })?; + + if list_result.objects.is_empty() { + break; + } + + for (key_bytes, _) in &list_result.objects { + let key_str = String::from_utf8_lossy(key_bytes).to_string(); + + // Compute relative path by stripping the prefix + let rel_path = if let Some(ref pfx) = prefix { + key_str.strip_prefix(pfx).unwrap_or(&key_str) + } else { + &key_str + }; + let rel_path = rel_path.trim_start_matches('/'); + + let dest_file = local_dir.join(rel_path); + + // Create parent directories + if let Some(parent) = dest_file.parent() { + fs::create_dir_all(parent)?; + } + + // Download the object (with gateway-by-key primary + blob-by-hash fallback) + println!("Downloading {} -> {}", key_str, dest_file.display()); + let data = download_object_data( + &gateway_client, + &gateway_url, + &fm_client, + &rpc_url, + storage_base.bucket_address, + &key_str, + ) + .await + .with_context(|| format!("Failed to download object: {}", key_str))?; + + fs::write(&dest_file, data)?; + downloaded_count += 1; + } + + if list_result.next_key.is_none() { + break; + } + + start_key = list_result + .next_key + .map(|k| String::from_utf8_lossy(k.as_slice()).to_string()); + } + + println!("āœ“ Downloaded {} files", downloaded_count); + Ok(()) +} + +/// Copy an object between storage buckets +async fn copy_between_buckets(_global: &GlobalArguments, args: &CopyArgs) -> Result<()> { + let source_path = path::StoragePath::parse(&args.source)?; + let dest_path = path::StoragePath::parse(&args.dest)?; + + println!("Copying {} -> {}", source_path.to_uri(), dest_path.to_uri()); + + // Download from source + let temp_dir = tempfile::tempdir()?; + let temp_file = temp_dir.path().join("temp_copy"); + + let download_args = CopyArgs { + source: args.source.clone(), + dest: temp_file.to_string_lossy().to_string(), + gateway: args.gateway.clone(), + config: args.config.clone(), + recursive: false, + overwrite: args.overwrite, + }; + + download_file(&source_path, &temp_file, &download_args).await?; + + // Upload to destination + let upload_args = CopyArgs { + source: temp_file.to_string_lossy().to_string(), + dest: args.dest.clone(), + gateway: args.gateway.clone(), + config: args.config.clone(), + recursive: false, + overwrite: args.overwrite, + }; + + upload_file(_global, &temp_file, &dest_path, &upload_args).await?; + + println!("āœ“ Copy complete"); + + Ok(()) +} diff --git a/ipc/cli/src/commands/storage/client/credit.rs b/ipc/cli/src/commands/storage/client/credit.rs new file mode 100644 index 0000000000..6f03226a61 --- /dev/null +++ b/ipc/cli/src/commands/storage/client/credit.rs @@ -0,0 +1,443 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Credit subcommand for buying and querying storage credits. + +use anyhow::{anyhow, Context, Result}; +use async_trait::async_trait; +use clap::{Args, Subcommand}; +use ipc_api::evm::payload_to_evm_address; +use ipc_wallet::EvmKeyStore; +use num_traits::Zero; +use std::path::PathBuf; +use std::time::Duration; + +use fendermint_actor_blobs_shared::{ + accounts::Account, method::Method as BlobsMethod, BLOBS_ACTOR_ADDR, +}; +use ethers::abi::{encode as abi_encode, Token}; +use fendermint_rpc::client::FendermintClient; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::tx::{TxClient, TxCommit}; +use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::eam::EthAddress; +use fendermint_vm_actor_interface::evm; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::{BytesSer, RawBytes}; +use fvm_shared::address::Address; +use fvm_shared::chainid::ChainID; +use fvm_shared::econ::TokenAmount; + +use crate::commands::storage::bucket; +use crate::commands::storage::client_context::resolve_write_context; +use crate::commands::storage::config::{ + resolve_client_config_path, resolve_provider_config_path, StorageClientConfig, StorageConfig, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +#[command(name = "credit", about = "Buy and query storage credits")] +pub struct CreditCommandArgs { + #[command(subcommand)] + command: CreditCommands, +} + +#[derive(Debug, Subcommand)] +pub enum CreditCommands { + /// Buy storage credits by sending tokens to the blobs actor + Buy(BuyCreditArgs), + /// Get account credit information + Info(CreditInfoArgs), +} + +impl CreditCommandArgs { + pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> { + match &self.command { + CreditCommands::Buy(args) => BuyCredit::handle(global, args).await, + CreditCommands::Info(args) => CreditInfo::handle(global, args).await, + } + } +} + +// --------------------------------------------------------------------------- +// Buy +// --------------------------------------------------------------------------- + +#[derive(Debug, Args)] +pub struct BuyCreditArgs { + /// Amount of tokens to spend (in FIL/ether units, e.g. 0.1) + #[arg(value_name = "AMOUNT")] + pub amount: f64, + + /// Recipient address (defaults to the operator key address) + #[arg(long)] + pub to: Option, + + /// Storage client/provider config file + #[arg(long)] + pub config: Option, +} + +pub struct BuyCredit; + +#[async_trait] +impl CommandLineHandler for BuyCredit { + type Arguments = BuyCreditArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + if args.amount <= 0.0 { + return Err(anyhow!("Amount must be positive")); + } + + let write_ctx = resolve_write_context(global, args.config.clone())?; + let rpc_url = write_ctx.rpc_url; + let secret_key = write_ctx.secret_key; + + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let chain_id = bucket::query_chain_id(&fm_client) + .await + .context("Failed to query chain ID")?; + + let pub_key = secret_key.public_key(); + // Use delegated sender address for storage txs. + let sender_eth = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated sender address from signer key")?; + let addr = Address::new_delegated(10, &sender_eth.0) + .context("failed to construct delegated sender address")?; + + // Determine recipient — blobs actor requires a delegated (f410) address + let recipient = if let Some(ref to_str) = args.to { + crate::require_fil_addr_from_str(to_str)? + } else { + let eth_addr = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated address from operator key")?; + Address::new_delegated(10, ð_addr.0).context("failed to construct f410 address")? + }; + + let state = fm_client + .actor_state(&addr, FvmQueryHeight::default()) + .await + .context("Failed to get actor state")?; + let sequence = state.value.map(|(_, s)| s.sequence).ok_or_else(|| { + anyhow!( + "sender actor {} does not exist on-chain at {}. Fund/initialize this delegated \ + address first (or use a signer key whose delegated address exists on this subnet).", + addr, + rpc_url + ) + })?; + + let mf = SignedMessageFactory::new(secret_key, addr, sequence, ChainID::from(chain_id)); + let mut bound_client = fm_client.bind(mf); + + // The blobs actor is an EVM actor; invoke through InvokeContract with ABI calldata. + let recipient_eth = payload_to_evm_address(recipient.payload()) + .context("BuyCredit recipient must be an EVM/delegated address")?; + let mut calldata = Vec::with_capacity(4 + 32); + // buyCredit(address) selector from credit facade ABI. + calldata.extend_from_slice(&[0xa3, 0x8e, 0xae, 0x9f]); + calldata.extend_from_slice(&abi_encode(&[Token::Address(recipient_eth)])); + let invoke_params = RawBytes::serialize(BytesSer(&calldata)) + .context("Failed to serialize FEVM calldata for BuyCredit")?; + + // Convert amount to TokenAmount (nano precision) + let value = crate::f64_to_token_amount(args.amount)?; + let gas_params = bucket::tx_gas_params( + &bound_client, + addr, + BLOBS_ACTOR_ADDR, + evm::Method::InvokeContract as u64, + invoke_params, + value.clone(), + ) + .await + .context("Failed to estimate BuyCredit gas parameters")?; + + println!( + "Buying credit for {} with {} FIL...", + recipient, args.amount + ); + + let res = TxClient::::fevm_invoke( + &mut bound_client, + BLOBS_ACTOR_ADDR, + calldata.into(), + value, + gas_params, + ) + .await + .map_err(|e| { + anyhow!( + "Failed to send BuyCredit transaction: {} (sender={} recipient={} rpc={})", + e, + addr, + recipient, + rpc_url + ) + })?; + + if res.response.check_tx.code.is_err() { + let log = &res.response.check_tx.log; + let info = &res.response.check_tx.info; + return Err(anyhow!( + "BuyCredit check_tx failed (code {:?}): log={} info={} sender={} recipient={} rpc={}", + res.response.check_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + addr, + recipient, + rpc_url, + )); + } + + if res.response.deliver_tx.code.is_err() { + let log = &res.response.deliver_tx.log; + let info = &res.response.deliver_tx.info; + return Err(anyhow!( + "BuyCredit deliver_tx failed (code {:?}): log={} info={} sender={} recipient={} rpc={}", + res.response.deliver_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + addr, + recipient, + rpc_url + )); + } + + println!("Credit purchased successfully for {}", recipient); + + Ok(()) + } +} + +// --------------------------------------------------------------------------- +// Info +// --------------------------------------------------------------------------- + +#[derive(Debug, Args)] +pub struct CreditInfoArgs { + /// Account address to query (defaults to client-config/provider key when available) + #[arg(long)] + pub address: Option, + + /// Storage client/provider config file + #[arg(long)] + pub config: Option, + + /// Tendermint RPC URL (used when no storage config exists; overrides config when provided) + #[arg(long)] + pub rpc_url: Option, + + /// Output in JSON format + #[arg(long)] + pub json: bool, +} + +pub struct CreditInfo; + +#[async_trait] +impl CommandLineHandler for CreditInfo { + type Arguments = CreditInfoArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let client_config_path = resolve_client_config_path(args.config.clone()); + let provider_config_path = resolve_provider_config_path(args.config.clone()); + let mut default_query_addr = None; + + let (rpc_url, rpc_source) = if client_config_path.exists() { + let client_cfg = StorageClientConfig::load(&client_config_path).with_context(|| { + format!( + "failed to load client storage config at {}", + client_config_path.display() + ) + })?; + if args.address.is_none() { + if let Some(addr) = client_cfg.address { + default_query_addr = Some(crate::require_fil_addr_from_str(&addr)?); + } + } + if let Some(url) = args.rpc_url.clone() { + (url, "--rpc-url".to_string()) + } else { + ( + client_cfg.tendermint_rpc_url, + format!("storage client config '{}'", client_config_path.display()), + ) + } + } else if provider_config_path.exists() { + let config = StorageConfig::load(&provider_config_path).with_context(|| { + format!( + "failed to load provider storage config at {}", + provider_config_path.display() + ) + })?; + if args.address.is_none() { + let secret_key = SignedMessageFactory::read_secret_key(&config.secret_key_file) + .with_context(|| { + format!( + "failed to read provider secret key from {}", + config.secret_key_file.display() + ) + })?; + // Keep query defaults consistent with write paths: use delegated (f410) address. + let eth_addr = EthAddress::new_secp256k1(&secret_key.public_key().serialize()) + .context("failed to derive delegated address from provider secret key")?; + default_query_addr = Some( + Address::new_delegated(10, ð_addr.0) + .context("failed to construct delegated query address")?, + ); + } + if let Some(url) = args.rpc_url.clone() { + (url, "--rpc-url".to_string()) + } else { + ( + config.tendermint_rpc_url, + format!("storage provider config '{}'", provider_config_path.display()), + ) + } + } else { + if args.address.is_none() { + let provider = crate::commands::get_ipc_provider(global).context( + "failed to load IPC provider config to infer default wallet address", + )?; + if let Ok(wallet) = provider.evm_wallet() { + let mut wallet = wallet.write().unwrap(); + if let Some(default_evm) = wallet.get_default()? { + let eth_addr: ethers::types::Address = default_evm.clone().into(); + default_query_addr = + Some(ipc_api::ethers_address_to_fil_address(ð_addr)?); + } + } + } + if let Some(url) = args.rpc_url.clone() { + (url, "--rpc-url".to_string()) + } else { + ( + "http://127.0.0.1:26657".to_string(), + "default localhost RPC".to_string(), + ) + } + }; + let rpc_endpoint = rpc_url.parse().with_context(|| { + format!( + "Invalid Tendermint RPC URL '{}' from {}", + rpc_url, rpc_source + ) + })?; + let fm_client = FendermintClient::new_http(rpc_endpoint, None)?; + + // Determine the address to query. + let query_addr = if let Some(ref addr_str) = args.address { + crate::require_fil_addr_from_str(addr_str)? + } else if let Some(addr) = default_query_addr { + addr + } else { + return Err(anyhow!( + "No default address available. For user mode, pass --address (and optionally --rpc-url). \ + For provider mode, run 'ipc-cli storage node init' to generate storage config." + )); + }; + + // Query the GetAccount method on the blobs actor + let params_bytes = + RawBytes::serialize(query_addr).context("Failed to serialize address")?; + + let msg = fvm_shared::message::Message { + version: Default::default(), + from: fendermint_vm_actor_interface::system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: BlobsMethod::GetAccount as u64, + params: params_bytes, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = tokio::time::timeout( + Duration::from_secs(15), + fm_client.call(msg, FvmQueryHeight::default()), + ) + .await + .map_err(|_| { + anyhow!( + "Timed out after 15s querying GetAccount via Tendermint RPC at {}", + rpc_url + ) + })? + .with_context(|| { + format!( + "Failed to query GetAccount (address={} rpc={})", + query_addr, rpc_url + ) + })?; + + if response.value.code.is_err() { + return Err(anyhow!( + "GetAccount query failed (code {:?}): log={} info={} (address={} rpc={})", + response.value.code, + if response.value.log.is_empty() { + "" + } else { + &response.value.log + }, + if response.value.info.is_empty() { + "" + } else { + &response.value.info + }, + query_addr, + rpc_url + )); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("Failed to decode response data")?; + + let account: Option = + fvm_ipld_encoding::from_slice(&return_data).context("Failed to decode Account")?; + + match account { + Some(acct) => { + if args.json { + let output = serde_json::json!({ + "address": query_addr.to_string(), + "capacity_used": acct.capacity_used, + "credit_free": acct.credit_free.atto().to_string(), + "credit_committed": acct.credit_committed.atto().to_string(), + "credit_sponsor": acct.credit_sponsor.map(|a| a.to_string()), + "last_debit_epoch": acct.last_debit_epoch, + "max_ttl": acct.max_ttl, + "gas_allowance": acct.gas_allowance.atto().to_string(), + "approvals_to": acct.approvals_to.len(), + "approvals_from": acct.approvals_from.len(), + }); + println!("{}", serde_json::to_string_pretty(&output)?); + } else { + println!("Account: {}", query_addr); + println!(" Capacity used: {} bytes", acct.capacity_used); + println!(" Credit free: {}", acct.credit_free); + println!(" Credit committed: {}", acct.credit_committed); + if let Some(sponsor) = &acct.credit_sponsor { + println!(" Credit sponsor: {}", sponsor); + } + println!(" Last debit epoch: {}", acct.last_debit_epoch); + println!(" Max TTL: {} epochs", acct.max_ttl); + println!(" Gas allowance: {}", acct.gas_allowance); + println!( + " Approvals: {} outgoing, {} incoming", + acct.approvals_to.len(), + acct.approvals_from.len() + ); + } + } + None => { + println!("No account found for {}", query_addr); + } + } + + Ok(()) + } +} diff --git a/ipc/cli/src/commands/storage/client/ls.rs b/ipc/cli/src/commands/storage/client/ls.rs new file mode 100644 index 0000000000..f1e51f33c4 --- /dev/null +++ b/ipc/cli/src/commands/storage/client/ls.rs @@ -0,0 +1,225 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! List command for storage operations + +use anyhow::{anyhow, Context, Result}; +use clap::Args; +use std::path::PathBuf; + +use fendermint_rpc::client::FendermintClient; +use serde_json::json; + +use async_trait::async_trait; + +use crate::commands::storage::{ + bucket, + config::{resolve_client_config_path, StorageClientConfig, StorageConfig}, + path, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct ListArgs { + /// Storage path (ipc://bucket_address/prefix) + #[arg(value_name = "PATH")] + pub path: String, + + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Output in JSON format + #[arg(long)] + pub json: bool, + + /// Show all details + #[arg(short, long)] + pub long: bool, + + /// Delimiter for hierarchical listing (default: none, S3-style: "/") + #[arg(short, long)] + pub delimiter: Option, + + /// Maximum number of objects to list + #[arg(long, default_value = "100")] + pub limit: u64, +} + +pub struct ListStorage; + +#[async_trait] +impl CommandLineHandler for ListStorage { + type Arguments = ListArgs; + + async fn handle(_global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let storage_path = path::StoragePath::parse(&args.path)?; + + // Load client/provider config and resolve RPC endpoint. + let config_path = resolve_client_config_path(args.config.clone()); + let rpc_url = if config_path.exists() { + if let Ok(client_cfg) = StorageClientConfig::load(&config_path) { + client_cfg.tendermint_rpc_url + } else { + StorageConfig::load(&config_path)?.tendermint_rpc_url + } + } else { + return Err(anyhow!( + "Storage config not found at {}. Run 'ipc-cli storage client init' (user) or 'ipc-cli storage node init' (provider).", + config_path.display() + )); + }; + + // Create FendermintClient + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + // List objects + let prefix = if storage_path.is_bucket_root() { + None + } else { + Some(storage_path.key.clone()) + }; + + let list_result = bucket::list_objects( + &fm_client, + storage_path.bucket_address, + prefix.clone(), + args.delimiter.clone(), + None, // start_key + args.limit, + ) + .await + .with_context(|| { + format!( + "Failed to list objects (bucket={} prefix={} delimiter={} limit={} rpc={})", + storage_path.bucket_address, + prefix.as_deref().unwrap_or(""), + args.delimiter.as_deref().unwrap_or(""), + args.limit, + rpc_url + ) + })?; + + // Output results + if args.json { + print_json(&list_result, &prefix)?; + } else { + print_table(&list_result, &prefix, args.long)?; + } + + Ok(()) + } +} + +fn print_json( + result: &fendermint_actor_bucket::ListObjectsReturn, + _prefix: &Option, +) -> Result<()> { + let mut objects = Vec::new(); + for (key, state) in &result.objects { + let key_str = String::from_utf8_lossy(key); + objects.push(json!({ + "key": key_str, + "hash": format!("0x{}", hex::encode(state.hash.0)), + "size": state.size, + "expiry": state.expiry, + "metadata": state.metadata, + })); + } + + let mut prefixes = Vec::new(); + for prefix in &result.common_prefixes { + prefixes.push(String::from_utf8_lossy(prefix).to_string()); + } + + let output = json!({ + "objects": objects, + "common_prefixes": prefixes, + "next_key": result.next_key.as_ref().map(|k| String::from_utf8_lossy(k).to_string()), + }); + + println!("{}", serde_json::to_string_pretty(&output)?); + Ok(()) +} + +fn print_table( + result: &fendermint_actor_bucket::ListObjectsReturn, + _prefix: &Option, + long: bool, +) -> Result<()> { + if result.objects.is_empty() && result.common_prefixes.is_empty() { + println!("No objects found"); + return Ok(()); + } + + // Print common prefixes (directories) first + if !result.common_prefixes.is_empty() { + if long { + println!("{:<50} {:<10} {:<66}", "KEY", "SIZE", "HASH"); + println!("{}", "-".repeat(130)); + } + + for prefix_bytes in &result.common_prefixes { + let prefix_str = String::from_utf8_lossy(prefix_bytes); + if long { + println!("{:<50} {:<10} {:<66}", prefix_str, "DIR", "-"); + } else { + println!("{}", prefix_str); + } + } + } + + // Print objects + if !result.objects.is_empty() { + if long && result.common_prefixes.is_empty() { + println!("{:<50} {:<10} {:<66}", "KEY", "SIZE", "HASH"); + println!("{}", "-".repeat(130)); + } + + for (key, state) in &result.objects { + let key_str = String::from_utf8_lossy(key); + let hash_str = format!("0x{}", hex::encode(&state.hash.0[..8])); // Truncated hash + + if long { + println!( + "{:<50} {:<10} {:<66}", + key_str, + format_size(state.size), + hash_str + ); + } else { + println!("{}", key_str); + } + } + } + + // Print pagination info + if let Some(next_key) = &result.next_key { + let next_key_str = String::from_utf8_lossy(next_key); + println!("\n(More results available, next key: {})", next_key_str); + } + + println!( + "\nTotal: {} objects, {} prefixes", + result.objects.len(), + result.common_prefixes.len() + ); + + Ok(()) +} + +fn format_size(bytes: u64) -> String { + const KB: u64 = 1024; + const MB: u64 = KB * 1024; + const GB: u64 = MB * 1024; + + if bytes >= GB { + format!("{:.1} GB", bytes as f64 / GB as f64) + } else if bytes >= MB { + format!("{:.1} MB", bytes as f64 / MB as f64) + } else if bytes >= KB { + format!("{:.1} KB", bytes as f64 / KB as f64) + } else { + format!("{} B", bytes) + } +} diff --git a/ipc/cli/src/commands/storage/client/mod.rs b/ipc/cli/src/commands/storage/client/mod.rs new file mode 100644 index 0000000000..a75cbb8697 --- /dev/null +++ b/ipc/cli/src/commands/storage/client/mod.rs @@ -0,0 +1,268 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +use anyhow::{anyhow, Context, Result}; +use clap::{Args, Subcommand}; +use fendermint_rpc::client::FendermintClient; +use fvm_shared::address::Address; +use std::path::PathBuf; +use url::Url; + +pub mod cat; +pub mod credit; +pub mod ls; +pub mod stat; +pub mod bucket; +pub mod cp; +pub mod mv; +pub mod rm; +pub mod sync; + +use crate::commands::storage::config::{ + default_storage_client_config_path, resolve_client_config_path, StorageClientConfig, +}; +use crate::commands::storage::client::{ + bucket::BucketCommandArgs, + cat::{CatArgs, CatStorage}, + cp::{CopyArgs, CopyStorage}, + credit::CreditCommandArgs, + ls::{ListArgs, ListStorage}, + mv::{MoveArgs, MoveStorage}, + rm::{RemoveArgs, RemoveStorage}, + stat::{StatArgs, StatStorage}, + sync::{SyncArgs, SyncStorage}, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +#[command(name = "client", about = "Storage client operations and config")] +pub struct StorageClientCommandArgs { + #[command(subcommand)] + command: StorageClientCommands, +} + +#[derive(Debug, Subcommand)] +pub enum StorageClientCommands { + /// Create and manage storage buckets + Bucket(BucketCommandArgs), + /// Buy and query storage credits + Credit(CreditCommandArgs), + /// Copy files to/from storage + Cp(CopyArgs), + /// List objects in storage + Ls(ListArgs), + /// Display file contents from storage + Cat(CatArgs), + /// Show object metadata + Stat(StatArgs), + /// Remove objects from storage + Rm(RemoveArgs), + /// Move/rename objects in storage + Mv(MoveArgs), + /// Sync directories with storage + Sync(SyncArgs), + /// Initialize storage client config + Init(StorageClientInitArgs), + /// Show effective storage client config + Show(StorageClientShowArgs), + /// Update storage client config values + Set(StorageClientSetArgs), +} + +impl StorageClientCommandArgs { + pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> { + match &self.command { + StorageClientCommands::Bucket(args) => args.handle(global).await, + StorageClientCommands::Credit(args) => args.handle(global).await, + StorageClientCommands::Cp(args) => CopyStorage::handle(global, args).await, + StorageClientCommands::Ls(args) => ListStorage::handle(global, args).await, + StorageClientCommands::Cat(args) => CatStorage::handle(global, args).await, + StorageClientCommands::Stat(args) => StatStorage::handle(global, args).await, + StorageClientCommands::Rm(args) => RemoveStorage::handle(global, args).await, + StorageClientCommands::Mv(args) => MoveStorage::handle(global, args).await, + StorageClientCommands::Sync(args) => SyncStorage::handle(global, args).await, + StorageClientCommands::Init(args) => init_client_config(args), + StorageClientCommands::Show(args) => show_client_config(args), + StorageClientCommands::Set(args) => set_client_config(args), + } + } +} + +#[derive(Debug, Args)] +pub struct StorageClientInitArgs { + /// Output path for storage client config + #[arg(long)] + pub out: Option, + /// Tendermint RPC URL + #[arg(long, default_value = "http://127.0.0.1:26657")] + pub rpc_url: String, + /// Gateway URL + #[arg(long)] + pub gateway_url: Option, + /// Default account address + #[arg(long)] + pub address: Option, +} + +#[derive(Debug, Args)] +pub struct StorageClientShowArgs { + /// Storage client config path + #[arg(long)] + pub config: Option, +} + +#[derive(Debug, Args)] +pub struct StorageClientSetArgs { + /// Storage client config path + #[arg(long)] + pub config: Option, + /// Tendermint RPC URL + #[arg(long)] + pub rpc_url: Option, + /// Gateway URL + #[arg(long)] + pub gateway_url: Option, + /// Default account address + #[arg(long)] + pub address: Option, +} + +fn init_client_config(args: &StorageClientInitArgs) -> Result<()> { + validate_http_url("rpc-url", &args.rpc_url)?; + if let Some(gateway_url) = &args.gateway_url { + validate_http_url("gateway-url", gateway_url)?; + } + + let path = args + .out + .clone() + .unwrap_or_else(default_storage_client_config_path); + let cfg = StorageClientConfig { + tendermint_rpc_url: args.rpc_url.clone(), + gateway_url: args.gateway_url.clone(), + address: args.address.clone(), + }; + cfg.save(&path)?; + println!("Storage client config written to {}", path.display()); + Ok(()) +} + +fn show_client_config(args: &StorageClientShowArgs) -> Result<()> { + let path = resolve_client_config_path(args.config.clone()); + let cfg = if path.exists() { + StorageClientConfig::load(&path)? + } else { + StorageClientConfig::default_with_local_rpc() + }; + println!("{}", serde_yaml::to_string(&cfg)?); + Ok(()) +} + +fn set_client_config(args: &StorageClientSetArgs) -> Result<()> { + let path = resolve_client_config_path(args.config.clone()); + let mut cfg = if path.exists() { + StorageClientConfig::load(&path)? + } else { + StorageClientConfig::default_with_local_rpc() + }; + if let Some(v) = &args.rpc_url { + validate_http_url("rpc-url", v)?; + cfg.tendermint_rpc_url = v.clone(); + } + if let Some(v) = &args.gateway_url { + validate_http_url("gateway-url", v)?; + cfg.gateway_url = Some(v.clone()); + } + if let Some(v) = &args.address { + cfg.address = Some(v.clone()); + } + cfg.save(&path)?; + println!("Updated storage client config at {}", path.display()); + Ok(()) +} + +/// Download object data from the gateway, with a hash-based fallback. +/// +/// Primary path: `GET /v1/objects/{bucket}/{key}` — gateway resolves the key to a blob hash +/// on-chain then fetches via shard retrieval. +/// +/// Fallback: if the primary path returns 404 (common when the blob hasn't been confirmed in +/// the blobs actor yet), we query `ListObjects` directly to retrieve the hash from bucket +/// state, then try `GET /v1/blobs/{hash}` on the gateway. +/// +/// If both paths fail, the error explains what was attempted so the caller can diagnose. +pub(crate) async fn download_object_data( + gateway: &crate::commands::storage::gateway::GatewayClient, + gateway_url: &str, + fm_client: &FendermintClient, + rpc_url: &str, + bucket_address: Address, + key: &str, +) -> Result> { + match gateway.download_object(&bucket_address, key, None).await { + Ok(data) => return Ok(data), + Err(primary_err) => { + // The bucket actor's GetObject also verifies blob liveness in the blobs actor. + // ListObjects reads bucket state directly and is not gated on blob liveness. + let listed = crate::commands::storage::bucket::list_objects( + fm_client, + bucket_address, + Some(key.to_string()), + None, + None, + 16, + ) + .await + .with_context(|| { + format!( + "Primary download failed ({primary_err}); also failed to query on-chain \ + object metadata as fallback (bucket={bucket_address} key={key} rpc={rpc_url})" + ) + })?; + + let key_bytes = key.as_bytes(); + let obj = listed + .objects + .iter() + .find(|(k, _)| k.as_slice() == key_bytes) + .map(|(_, o)| o) + .ok_or_else(|| { + anyhow!( + "Object not found on-chain (bucket={} key={}). \ + If you just uploaded it, storage nodes may still be confirming the blob. \ + Primary gateway error: {}", + bucket_address, + key, + primary_err + ) + })?; + + let blob_hash = hex::encode(obj.hash.0); + gateway + .download_blob(&blob_hash, None) + .await + .with_context(|| { + format!( + "Object is registered on-chain (hash=0x{blob_hash}) but the gateway \ + could not retrieve the blob. Storage nodes may still be distributing \ + shards or the blob may have expired. \ + Gateway: {gateway_url} Bucket: {bucket_address} Key: {key}" + ) + }) + } + } +} + +fn validate_http_url(field_name: &str, value: &str) -> Result<()> { + let parsed = Url::parse(value) + .map_err(|e| anyhow::anyhow!("invalid {} '{}': {}", field_name, value, e))?; + match parsed.scheme() { + "http" | "https" => Ok(()), + scheme => Err(anyhow::anyhow!( + "invalid {} '{}': unsupported scheme '{}', expected http or https", + field_name, + value, + scheme + )), + } +} diff --git a/ipc/cli/src/commands/storage/client/mv.rs b/ipc/cli/src/commands/storage/client/mv.rs new file mode 100644 index 0000000000..70062aef8a --- /dev/null +++ b/ipc/cli/src/commands/storage/client/mv.rs @@ -0,0 +1,214 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Move/rename command for storage objects + +use anyhow::{anyhow, Context, Result}; +use clap::Args; +use std::path::PathBuf; + +use async_trait::async_trait; +use fendermint_rpc::client::FendermintClient; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::eam::EthAddress; +use fvm_shared::address::Address; +use fvm_shared::chainid::ChainID; + +use crate::commands::storage::{ + bucket, + client_context::resolve_write_context, + config::{resolve_client_gateway_url}, + gateway::GatewayClient, + path, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct MoveArgs { + /// Source storage path (ipc://bucket_address/path/to/file) + #[arg(value_name = "SOURCE")] + pub source: String, + + /// Destination storage path (ipc://bucket_address/path/to/newfile) + #[arg(value_name = "DEST")] + pub dest: String, + + /// Gateway URL (overrides config and env var) + #[arg(long)] + pub gateway: Option, + + /// Storage config file + #[arg(long)] + pub config: Option, +} + +pub struct MoveStorage; + +#[async_trait] +impl CommandLineHandler for MoveStorage { + type Arguments = MoveArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let source_path = path::StoragePath::parse(&args.source)?; + let dest_path = path::StoragePath::parse(&args.dest)?; + + if source_path.is_bucket_root() || dest_path.is_bucket_root() { + return Err(anyhow!( + "Paths must include file keys, not just bucket addresses" + )); + } + + println!("Moving {} -> {}", source_path.to_uri(), dest_path.to_uri()); + + let write_ctx = resolve_write_context(global, args.config.clone())?; + let rpc_url = write_ctx.rpc_url; + let secret_key = write_ctx.secret_key; + + // Create clients + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + // Get source object metadata via list_objects (reliable — no liveness check) + let listed = bucket::list_objects( + &fm_client, + source_path.bucket_address, + Some(source_path.key.clone()), + None, + None, + 16, + ) + .await + .with_context(|| { + format!( + "Failed to query source object (bucket={} key={} rpc={})", + source_path.bucket_address, source_path.key, rpc_url + ) + })?; + + let key_bytes = source_path.key.as_bytes(); + let source_object = listed + .objects + .iter() + .find(|(k, _)| k.as_slice() == key_bytes) + .map(|(_, o)| o) + .ok_or_else(|| anyhow!("Source object not found: {}", source_path.key))?; + + // Query chain ID from the network + let chain_id = bucket::query_chain_id(&fm_client) + .await + .with_context(|| format!("Failed to query chain ID from rpc={}", rpc_url))?; + + let pub_key = secret_key.public_key(); + let eth_addr = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated address")?; + let addr = + Address::new_delegated(10, ð_addr.0).context("failed to construct f410 address")?; + let state = fm_client + .actor_state( + &addr, + fendermint_vm_message::query::FvmQueryHeight::default(), + ) + .await + .with_context(|| format!("Failed to get actor state for sender {} via rpc={}", addr, rpc_url))?; + let sequence = state.value.map(|(_, s)| s.sequence).ok_or_else(|| { + anyhow!( + "sender actor {} does not exist on-chain at {}. Fund/initialize this delegated \ + address first.", + addr, + rpc_url + ) + })?; + + // Check if the destination already exists before binding (fm_client is consumed by bind). + // mv semantics are to replace the destination, but addObject has no overwrite flag in the + // EVM interface, so we delete the destination first when it exists. + let dest_exists = if source_path.bucket_address == dest_path.bucket_address { + let dest_listed = bucket::list_objects( + &fm_client, + dest_path.bucket_address, + Some(dest_path.key.clone()), + None, + None, + 2, + ) + .await + .context("Failed to check destination existence")?; + let dest_key_bytes = dest_path.key.as_bytes(); + dest_listed + .objects + .iter() + .any(|(k, _)| k.as_slice() == dest_key_bytes) + } else { + false + }; + + let mf = SignedMessageFactory::new(secret_key, addr, sequence, ChainID::from(chain_id)); + let mut bound_client = fm_client.bind(mf); + + // If moving within the same bucket, we can reuse the blob hash + if source_path.bucket_address == dest_path.bucket_address { + println!("Moving within same bucket (reusing blob)..."); + + if dest_exists { + bucket::delete_object( + &mut bound_client, + dest_path.bucket_address, + dest_path.key.clone(), + ) + .await + .context("Failed to clear existing destination object")?; + } + + // Get source node ID from gateway + let gateway_url = + resolve_client_gateway_url(args.gateway.as_deref(), args.config.clone(), true)?; + let gateway_client = GatewayClient::new(gateway_url.clone())?; + let node_info = gateway_client.get_node_info().await.with_context(|| { + format!( + "Failed to fetch gateway node info from {} (needed for move source node id)", + gateway_url + ) + })?; + let source_node = + bucket::hash_to_b256(&node_info.node_id).context("Invalid node ID from gateway")?; + + bucket::add_object( + &mut bound_client, + dest_path.bucket_address, + source_node, + dest_path.key.clone(), + source_object.hash, + source_object.hash, // recovery_hash: reuse blob hash (ObjectState has no separate recovery_hash) + source_object.size, + source_object.metadata.clone(), + 4, // data_shards - default + 2, // parity_shards - default + ) + .await + .context("Failed to add object at destination")?; + + println!("āœ“ Added at destination: {}", dest_path.key); + + // Delete from source location + bucket::delete_object( + &mut bound_client, + source_path.bucket_address, + source_path.key.clone(), + ) + .await + .context("Failed to delete source object")?; + + println!("āœ“ Deleted from source: {}", source_path.key); + } else { + // Moving across buckets requires re-upload + println!("Moving across buckets requires downloading and re-uploading..."); + return Err(anyhow!( + "Cross-bucket move not yet implemented. Use 'cp' followed by 'rm' instead." + )); + } + + println!("āœ“ Move complete"); + + Ok(()) + } +} diff --git a/ipc/cli/src/commands/storage/client/rm.rs b/ipc/cli/src/commands/storage/client/rm.rs new file mode 100644 index 0000000000..267a4cce1d --- /dev/null +++ b/ipc/cli/src/commands/storage/client/rm.rs @@ -0,0 +1,276 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Remove command for deleting objects from storage + +use anyhow::{anyhow, Context, Result}; +use clap::Args; +use std::io::{self, Write}; +use std::path::PathBuf; + +use async_trait::async_trait; +use fendermint_rpc::client::FendermintClient; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::QueryClient; +use fendermint_vm_actor_interface::eam::EthAddress; +use fvm_shared::address::Address; +use fvm_shared::chainid::ChainID; + +use crate::commands::storage::{bucket, client_context::resolve_write_context, path}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct RemoveArgs { + /// Storage path (ipc://bucket_address/path/to/file) + #[arg(value_name = "PATH")] + pub path: String, + + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Recursive delete (for prefix-based deletion) + #[arg(short, long)] + pub recursive: bool, + + /// Force deletion without confirmation + #[arg(short, long)] + pub force: bool, +} + +pub struct RemoveStorage; + +#[async_trait] +impl CommandLineHandler for RemoveStorage { + type Arguments = RemoveArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let storage_path = path::StoragePath::parse(&args.path)?; + + if storage_path.is_bucket_root() { + return Err(anyhow!( + "Cannot delete entire bucket. Specify a key or prefix." + )); + } + + // Handle recursive deletion + if args.recursive { + return delete_recursive(global, &storage_path, args).await; + } + + // Single file deletion + delete_file(global, &storage_path, args).await + } +} + +async fn delete_file( + global: &GlobalArguments, + storage_path: &path::StoragePath, + args: &RemoveArgs, +) -> Result<()> { + // Confirm deletion unless --force + if !args.force { + print!("Delete {}? [y/N] ", storage_path.to_uri()); + io::stdout().flush()?; + + let mut input = String::new(); + io::stdin().read_line(&mut input)?; + + if !input.trim().eq_ignore_ascii_case("y") { + println!("Aborted"); + return Ok(()); + } + } + + let write_ctx = resolve_write_context(global, args.config.clone())?; + let rpc_url = write_ctx.rpc_url; + let secret_key = write_ctx.secret_key; + + // Create FendermintClient and bound client + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + // Query chain ID from the network + let chain_id = bucket::query_chain_id(&fm_client) + .await + .with_context(|| format!("Failed to query chain ID from rpc={}", rpc_url))?; + + let pub_key = secret_key.public_key(); + let eth_addr = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated address")?; + let addr = + Address::new_delegated(10, ð_addr.0).context("failed to construct f410 address")?; + let state = fm_client + .actor_state( + &addr, + fendermint_vm_message::query::FvmQueryHeight::default(), + ) + .await + .with_context(|| format!("Failed to get actor state for sender {} via rpc={}", addr, rpc_url))?; + let sequence = state.value.map(|(_, s)| s.sequence).ok_or_else(|| { + anyhow!( + "sender actor {} does not exist on-chain at {}. Fund/initialize this delegated \ + address first.", + addr, + rpc_url + ) + })?; + + let mf = SignedMessageFactory::new(secret_key, addr, sequence, ChainID::from(chain_id)); + let mut bound_client = fm_client.bind(mf); + + // Delete object + println!("Deleting {}...", storage_path.key); + + bucket::delete_object( + &mut bound_client, + storage_path.bucket_address, + storage_path.key.clone(), + ) + .await + .with_context(|| { + format!( + "Failed to delete object (bucket={} key={} sender={} rpc={})", + storage_path.bucket_address, storage_path.key, addr, rpc_url + ) + })?; + + println!("āœ“ Deleted: {}", storage_path.key); + + Ok(()) +} + +async fn delete_recursive( + global: &GlobalArguments, + storage_path: &path::StoragePath, + args: &RemoveArgs, +) -> Result<()> { + let write_ctx = resolve_write_context(global, args.config.clone())?; + let rpc_url = write_ctx.rpc_url; + let secret_key = write_ctx.secret_key; + + // List all objects with the prefix + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + let prefix = storage_path.key.clone(); + let mut deleted_count = 0u64; + let mut failed_count = 0u64; + let mut start_key = None; + + loop { + let list_result = bucket::list_objects( + &fm_client, + storage_path.bucket_address, + Some(prefix.clone()), + None, // no delimiter for recursive + start_key, + 100, // batch size + ) + .await + .with_context(|| { + format!( + "Failed to list objects for recursive delete (bucket={} prefix={} rpc={})", + storage_path.bucket_address, prefix, rpc_url + ) + })?; + + if list_result.objects.is_empty() { + break; + } + + // Confirm deletion unless --force + if !args.force && deleted_count == 0 { + println!( + "Found {} objects to delete with prefix: {}", + list_result.objects.len(), + prefix + ); + print!("Continue? [y/N] "); + io::stdout().flush()?; + + let mut input = String::new(); + io::stdin().read_line(&mut input)?; + + if !input.trim().eq_ignore_ascii_case("y") { + println!("Aborted"); + return Ok(()); + } + } + + // Delete each object + let chain_id = bucket::query_chain_id(&fm_client) + .await + .with_context(|| format!("Failed to query chain ID from rpc={}", rpc_url))?; + + let pub_key = secret_key.public_key(); + let eth_addr = EthAddress::new_secp256k1(&pub_key.serialize()) + .context("failed to derive delegated address")?; + let addr = + Address::new_delegated(10, ð_addr.0).context("failed to construct f410 address")?; + let state = fm_client + .actor_state( + &addr, + fendermint_vm_message::query::FvmQueryHeight::default(), + ) + .await + .with_context(|| format!("Failed to get actor state for sender {} via rpc={}", addr, rpc_url))?; + let sequence = state.value.map(|(_, s)| s.sequence).ok_or_else(|| { + anyhow!( + "sender actor {} does not exist on-chain at {}. Fund/initialize this delegated \ + address first.", + addr, + rpc_url + ) + })?; + + let mf = SignedMessageFactory::new(secret_key.clone(), addr, sequence, ChainID::from(chain_id)); + let mut bound_client = fm_client.clone().bind(mf); + + for (key, _) in &list_result.objects { + let key_str = String::from_utf8_lossy(key).to_string(); + match bucket::delete_object( + &mut bound_client, + storage_path.bucket_address, + key_str.clone(), + ) + .await + { + Ok(()) => { + println!("āœ“ Deleted: {}", key_str); + deleted_count += 1; + } + Err(e) => { + eprintln!("⚠ Skipped {}: {:#}", key_str, e); + failed_count += 1; + } + } + } + + // Check if there are more pages + if list_result.next_key.is_none() { + break; + } + + start_key = list_result + .next_key + .map(|k| String::from_utf8_lossy(&k).to_string()); + } + + if deleted_count == 0 && failed_count > 0 { + return Err(anyhow!( + "Could not delete any of the {} matching objects (blobs may still be pending finalization)", + failed_count + )); + } + + println!( + "\nDeleted {} objects{}", + deleted_count, + if failed_count > 0 { + format!(" ({} skipped — blobs pending finalization)", failed_count) + } else { + String::new() + } + ); + + Ok(()) +} diff --git a/ipc/cli/src/commands/storage/client/stat.rs b/ipc/cli/src/commands/storage/client/stat.rs new file mode 100644 index 0000000000..e2815a134f --- /dev/null +++ b/ipc/cli/src/commands/storage/client/stat.rs @@ -0,0 +1,156 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Stat command for displaying object metadata from storage + +use anyhow::{anyhow, Context, Result}; +use clap::Args; +use std::path::PathBuf; + +use fendermint_actor_bucket::ObjectState; +use fendermint_rpc::client::FendermintClient; +use serde_json::json; + +use async_trait::async_trait; + +use crate::commands::storage::{ + bucket, + config::{resolve_client_config_path, StorageClientConfig, StorageConfig}, + path, +}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct StatArgs { + /// Storage path (ipc://bucket_address/path/to/file) + #[arg(value_name = "PATH")] + pub path: String, + + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Output in JSON format + #[arg(long)] + pub json: bool, +} + +pub struct StatStorage; + +#[async_trait] +impl CommandLineHandler for StatStorage { + type Arguments = StatArgs; + + async fn handle(_global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + let storage_path = path::StoragePath::parse(&args.path)?; + + if storage_path.is_bucket_root() { + return Err(anyhow!( + "Path must include a file key, not just bucket address" + )); + } + + let config_path = resolve_client_config_path(args.config.clone()); + let rpc_url = if config_path.exists() { + if let Ok(client_cfg) = StorageClientConfig::load(&config_path) { + client_cfg.tendermint_rpc_url + } else { + StorageConfig::load(&config_path)?.tendermint_rpc_url + } + } else { + return Err(anyhow!( + "Storage config not found at {}. Run 'ipc-cli storage client init'.", + config_path.display() + )); + }; + + let fm_client = FendermintClient::new_http(rpc_url.parse()?, None)?; + + // Use ListObjects with the exact key as prefix to read object metadata from bucket + // state. This is more reliable than GetObject, which also verifies blob liveness in + // the blobs actor and may return None while the blob is still being confirmed. + let listed = bucket::list_objects( + &fm_client, + storage_path.bucket_address, + Some(storage_path.key.clone()), + None, + None, + 16, + ) + .await + .with_context(|| { + format!( + "Failed to query object metadata (bucket={} key={} rpc={})", + storage_path.bucket_address, storage_path.key, rpc_url + ) + })?; + + let key_bytes = storage_path.key.as_bytes(); + let obj = listed + .objects + .iter() + .find(|(k, _)| k.as_slice() == key_bytes) + .map(|(_, o)| o) + .ok_or_else(|| anyhow!("Object not found: {}", storage_path.key))?; + + if args.json { + print_json(&storage_path, obj)?; + } else { + print_table(&storage_path, obj)?; + } + + Ok(()) + } +} + +fn print_json(storage_path: &path::StoragePath, obj: &ObjectState) -> Result<()> { + let output = json!({ + "bucket": storage_path.bucket_address.to_string(), + "key": storage_path.key, + "hash": format!("0x{}", hex::encode(obj.hash.0)), + "size": obj.size, + "expiry": obj.expiry, + "metadata": obj.metadata, + }); + + println!("{}", serde_json::to_string_pretty(&output)?); + Ok(()) +} + +fn print_table(storage_path: &path::StoragePath, obj: &ObjectState) -> Result<()> { + println!("Object: {}", storage_path.to_uri()); + println!(" Bucket: {}", storage_path.bucket_address); + println!(" Key: {}", storage_path.key); + println!(" Hash: 0x{}", hex::encode(obj.hash.0)); + println!( + " Size: {} bytes ({})", + obj.size, + format_size(obj.size) + ); + println!(" Expiry: block {}", obj.expiry); + + if !obj.metadata.is_empty() { + println!(" Metadata:"); + for (key, value) in &obj.metadata { + println!(" {}: {}", key, value); + } + } + + Ok(()) +} + +fn format_size(bytes: u64) -> String { + const KB: u64 = 1024; + const MB: u64 = KB * 1024; + const GB: u64 = MB * 1024; + + if bytes >= GB { + format!("{:.2} GB", bytes as f64 / GB as f64) + } else if bytes >= MB { + format!("{:.2} MB", bytes as f64 / MB as f64) + } else if bytes >= KB { + format!("{:.2} KB", bytes as f64 / KB as f64) + } else { + format!("{} B", bytes) + } +} diff --git a/ipc/cli/src/commands/storage/client/sync.rs b/ipc/cli/src/commands/storage/client/sync.rs new file mode 100644 index 0000000000..1306a2e2ae --- /dev/null +++ b/ipc/cli/src/commands/storage/client/sync.rs @@ -0,0 +1,94 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Sync command for synchronizing directories with storage + +use anyhow::{anyhow, Result}; +use clap::Args; +use std::path::PathBuf; + +use async_trait::async_trait; + +use crate::commands::storage::client::cp::{CopyArgs, CopyStorage}; +use crate::commands::storage::path; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +pub struct SyncArgs { + /// Source path (local directory or ipc://bucket/prefix) + #[arg(value_name = "SOURCE")] + pub source: String, + + /// Destination path (ipc://bucket/prefix or local directory) + #[arg(value_name = "DEST")] + pub dest: String, + + /// Gateway URL (overrides config and env var) + #[arg(long)] + pub gateway: Option, + + /// Storage config file + #[arg(long)] + pub config: Option, + + /// Dry run (show what would be synced) + #[arg(long)] + pub dry_run: bool, + + /// Delete files in destination that don't exist in source + #[arg(long)] + pub delete: bool, +} + +pub struct SyncStorage; + +#[async_trait] +impl CommandLineHandler for SyncStorage { + type Arguments = SyncArgs; + + async fn handle(global: &GlobalArguments, args: &Self::Arguments) -> Result<()> { + if args.dry_run || args.delete { + return Err(anyhow!( + "sync --dry-run/--delete is not implemented yet; use 'storage client cp -r' for now" + )); + } + let source_is_storage = path::is_storage_path(&args.source); + let dest_is_storage = path::is_storage_path(&args.dest); + + match (source_is_storage, dest_is_storage) { + (false, true) => { + // Local -> Storage: currently mapped to recursive copy. + let cp_args = CopyArgs { + source: args.source.clone(), + dest: args.dest.clone(), + gateway: args.gateway.clone(), + config: args.config.clone(), + recursive: true, + overwrite: true, + }; + CopyStorage::handle(global, &cp_args).await + } + (true, false) => { + // Storage -> Local: currently mapped to recursive copy. + let cp_args = CopyArgs { + source: args.source.clone(), + dest: args.dest.clone(), + gateway: args.gateway.clone(), + config: args.config.clone(), + recursive: true, + overwrite: true, + }; + CopyStorage::handle(global, &cp_args).await + } + (true, true) => { + // Storage -> Storage sync + Err(anyhow!( + "Syncing between storage locations not yet implemented" + )) + } + (false, false) => Err(anyhow!( + "At least one path must be a storage path (ipc://...)" + )), + } + } +} diff --git a/ipc/cli/src/commands/storage/mod.rs b/ipc/cli/src/commands/storage/mod.rs new file mode 100644 index 0000000000..968a38a5c6 --- /dev/null +++ b/ipc/cli/src/commands/storage/mod.rs @@ -0,0 +1,41 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +pub mod client; +pub mod node; +pub mod shared; + +pub use shared::{bucket, client_context, config, gateway, path}; + +use crate::commands::storage::client::StorageClientCommandArgs; +use crate::commands::storage::node::StorageNodeCommandArgs; +use crate::GlobalArguments; +use clap::{Args, Subcommand}; + +#[derive(Debug, Args)] +#[command( + name = "storage", + about = "storage provider (node) and user (client) commands" +)] +#[command(args_conflicts_with_subcommands = true)] +pub(crate) struct StorageCommandsArgs { + #[command(subcommand)] + command: Commands, +} + +impl StorageCommandsArgs { + pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> { + match &self.command { + Commands::Node(args) => args.handle(global).await, + Commands::Client(args) => args.handle(global).await, + } + } +} + +#[derive(Debug, Subcommand)] +pub(crate) enum Commands { + /// Storage provider (node) setup and runtime + Node(StorageNodeCommandArgs), + /// Storage user/client operations and configuration + Client(StorageClientCommandArgs), +} diff --git a/ipc/cli/src/commands/storage/node/init.rs b/ipc/cli/src/commands/storage/node/init.rs new file mode 100644 index 0000000000..3feaff52c7 --- /dev/null +++ b/ipc/cli/src/commands/storage/node/init.rs @@ -0,0 +1,178 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +use crate::commands::node::config::NodeInitConfig; +use crate::commands::storage::config::{ + default_storage_provider_config_path, StorageConfig, StorageRunMode, +}; +use crate::CommandLineHandler; +use anyhow::{Context, Result}; +use async_trait::async_trait; +use clap::Args; +use fendermint_crypto::{to_b64, SecretKey}; +use fendermint_vm_actor_interface::eam::EthAddress; +use fvm_shared::address::{set_current_network, Address, Network}; +use rand::thread_rng; +use std::fs; +use std::path::{Path, PathBuf}; + +pub(crate) struct InitStorage; + +#[async_trait] +impl CommandLineHandler for InitStorage { + type Arguments = InitStorageArgs; + + async fn handle(_global: &crate::GlobalArguments, args: &Self::Arguments) -> Result<()> { + let node_cfg = NodeInitConfig::load(&args.node_config).with_context(|| { + format!( + "failed to read node config from {}", + args.node_config.display() + ) + })?; + + let node_home = node_cfg.home.clone(); + let storage_dir = node_home.join("storage"); + + tokio::fs::create_dir_all(&storage_dir) + .await + .with_context(|| { + format!( + "failed to create storage directory at {}", + storage_dir.display() + ) + })?; + + let out = args + .out + .clone() + .unwrap_or_else(default_storage_provider_config_path); + + let root = workspace_root(); + let secret_key_file = args + .secret_key_file + .clone() + .unwrap_or_else(|| storage_dir.join("operator.sk")); + ensure_operator_secret_key(&secret_key_file)?; + + // Storage tooling defaults to testnet addressing unless explicitly overridden at runtime. + set_current_network(Network::Testnet); + let operator_sk = read_operator_secret_key(&secret_key_file).with_context(|| { + format!( + "failed to read operator key from {}", + secret_key_file.display() + ) + })?; + let operator_pk = operator_sk.public_key(); + let operator_f1 = Address::new_secp256k1(&operator_pk.serialize()) + .context("failed to derive operator f1 address")?; + let operator_f410_eth = EthAddress::new_secp256k1(&operator_pk.serialize()) + .context("failed to derive operator delegated address")?; + let operator_f410 = Address::new_delegated(10, &operator_f410_eth.0) + .context("failed to construct operator f410 address")?; + let storage_cfg = StorageConfig { + node_home: node_home.clone(), + node_config: args.node_config.clone(), + storage_node_bin: root.join("target/release/node"), + storage_gateway_bin: root.join("target/release/gateway"), + network: "testnet".to_string(), + tendermint_rpc_url: "http://127.0.0.1:26657".to_string(), + eth_rpc_url: "http://127.0.0.1:8545".to_string(), + secret_key_file, + bls_key_file: storage_dir.join("bls_key.hex"), + operator_rpc_url: "http://127.0.0.1:8081".to_string(), + run_mode: StorageRunMode::Both, + node_rpc_bind_addr: "127.0.0.1:8081".to_string(), + iroh_node_path: storage_dir.join("iroh-node"), + iroh_node_v4_addr: Some("0.0.0.0:11204".to_string()), + node_batch_size: 10, + node_poll_interval_secs: 5, + node_max_concurrent_downloads: 10, + objects_listen_addr: "127.0.0.1:8080".to_string(), + iroh_gateway_path: storage_dir.join("iroh-gateway"), + iroh_gateway_v4_addr: Some("0.0.0.0:11205".to_string()), + }; + + storage_cfg + .save(&out) + .with_context(|| format!("failed to write storage config to {}", out.display()))?; + + log::info!("Storage configuration generated at: {}", out.display()); + log::info!( + "Optional: register operator with `ipc-cli storage run --config {} --register-operator`", + out.display() + ); + log::info!( + "Using operator secret key file: {}", + storage_cfg.secret_key_file.display() + ); + log::info!("Operator funding addresses:"); + log::info!(" - delegated (fund this): {}", operator_f410); + log::info!(" - native (diagnostic only): {}", operator_f1); + log::info!( + "Recommendation: cross-fund the delegated operator address above before `storage run --register-operator`." + ); + log::info!("Example:"); + log::info!( + " ipc-cli cross-msg fund --subnet \"{}\" --from --to {} 1", + node_cfg.subnet, + operator_f410 + ); + log::info!( + "Run storage services with `ipc-cli storage run --config {}`", + out.display() + ); + Ok(()) + } +} + +#[derive(Debug, Args)] +#[command(name = "init", about = "Generate storage-node config from node config")] +pub struct InitStorageArgs { + #[arg(long, help = "Path to node init YAML config (node_*.yaml)")] + pub node_config: PathBuf, + #[arg(long, help = "Output path for generated storage YAML config")] + pub out: Option, + #[arg( + long, + help = "Path to operator secp256k1 secret key file (base64). Defaults to /storage/operator.sk" + )] + pub secret_key_file: Option, +} + +fn workspace_root() -> PathBuf { + Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(|p| p.parent()) + .unwrap_or_else(|| Path::new(".")) + .to_path_buf() +} + +fn ensure_operator_secret_key(path: &Path) -> Result<()> { + if path.exists() { + return Ok(()); + } + + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).with_context(|| { + format!( + "failed to create operator key directory at {}", + parent.display() + ) + })?; + } + + let mut rng = thread_rng(); + let sk = SecretKey::random(&mut rng); + let sk_bytes = sk.serialize(); + let sk_b64 = to_b64(sk_bytes.as_ref()); + fs::write(path, sk_b64) + .with_context(|| format!("failed to write operator key to {}", path.display()))?; + log::info!("Generated dedicated operator key at {}", path.display()); + Ok(()) +} + +fn read_operator_secret_key(path: &Path) -> Result { + let sk = fendermint_rpc::message::SignedMessageFactory::read_secret_key(path) + .with_context(|| format!("failed to parse secret key at {}", path.display()))?; + Ok(sk) +} diff --git a/ipc/cli/src/commands/storage/node/mod.rs b/ipc/cli/src/commands/storage/node/mod.rs new file mode 100644 index 0000000000..662b7edb68 --- /dev/null +++ b/ipc/cli/src/commands/storage/node/mod.rs @@ -0,0 +1,35 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +use clap::{Args, Subcommand}; + +pub mod init; +pub mod run; + +use self::init::{InitStorage, InitStorageArgs}; +use self::run::{RunStorage, RunStorageArgs}; +use crate::{CommandLineHandler, GlobalArguments}; + +#[derive(Debug, Args)] +#[command(name = "node", about = "Storage provider node commands")] +pub struct StorageNodeCommandArgs { + #[command(subcommand)] + command: StorageNodeCommands, +} + +#[derive(Debug, Subcommand)] +pub enum StorageNodeCommands { + /// Initialize storage provider config + Init(InitStorageArgs), + /// Run storage node and/or gateway + Run(RunStorageArgs), +} + +impl StorageNodeCommandArgs { + pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> { + match &self.command { + StorageNodeCommands::Init(args) => InitStorage::handle(global, args).await, + StorageNodeCommands::Run(args) => RunStorage::handle(global, args).await, + } + } +} diff --git a/ipc/cli/src/commands/storage/node/run.rs b/ipc/cli/src/commands/storage/node/run.rs new file mode 100644 index 0000000000..863776a7a0 --- /dev/null +++ b/ipc/cli/src/commands/storage/node/run.rs @@ -0,0 +1,319 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +use crate::commands::storage::config::{StorageConfig, StorageRunMode}; +use crate::CommandLineHandler; +use anyhow::{bail, Context, Result}; +use async_trait::async_trait; +use clap::Args; +use std::path::{Path, PathBuf}; +use std::process::{Child, Command, Stdio}; +use std::str::FromStr; + +pub(crate) struct RunStorage; + +#[async_trait] +impl CommandLineHandler for RunStorage { + type Arguments = RunStorageArgs; + + async fn handle(_global: &crate::GlobalArguments, args: &Self::Arguments) -> Result<()> { + let cfg = StorageConfig::load(&args.config).with_context(|| { + format!( + "failed to load storage config from {}", + args.config.display() + ) + })?; + + preflight(&cfg).await?; + + if args.register_operator { + run_register_operator(&cfg)?; + } + + let mode = args.mode.unwrap_or(cfg.run_mode); + match mode { + StorageRunMode::Node => run_node(&cfg), + StorageRunMode::Gateway => run_gateway(&cfg), + StorageRunMode::Both => run_both(&cfg), + } + } +} + +#[derive(Debug, Args)] +#[command(name = "run", about = "Run storage node and gateway")] +pub struct RunStorageArgs { + #[arg(long, help = "Path to storage YAML config")] + pub config: std::path::PathBuf, + #[arg(long, help = "Register node operator before launching services")] + pub register_operator: bool, + #[arg(long, help = "Override run mode (node|gateway|both)")] + pub mode: Option, +} + +fn run_register_operator(cfg: &StorageConfig) -> Result<()> { + log::info!("Registering storage node operator"); + let status = Command::new(&cfg.storage_node_bin) + .arg("register-operator") + .arg("--bls-key-file") + .arg(&cfg.bls_key_file) + .arg("--secret-key-file") + .arg(&cfg.secret_key_file) + .arg("--operator-rpc-url") + .arg(&cfg.operator_rpc_url) + .arg("--chain-rpc-url") + .arg(&cfg.tendermint_rpc_url) + .env("FM_NETWORK", &cfg.network) + .status() + .with_context(|| { + format!( + "failed to execute register-operator using {}", + cfg.storage_node_bin.display() + ) + })?; + + if !status.success() { + bail!("register-operator exited with status {}", status); + } + Ok(()) +} + +fn run_node(cfg: &StorageConfig) -> Result<()> { + log::info!("Starting storage node"); + let status = node_command(cfg)? + .status() + .context("failed to start storage node process")?; + if !status.success() { + bail!("storage node exited with status {}", status); + } + Ok(()) +} + +fn run_gateway(cfg: &StorageConfig) -> Result<()> { + log::info!("Starting storage gateway"); + let status = gateway_command(cfg)? + .status() + .context("failed to start storage gateway process")?; + if !status.success() { + bail!("storage gateway exited with status {}", status); + } + Ok(()) +} + +fn run_both(cfg: &StorageConfig) -> Result<()> { + log::info!("Starting storage gateway + storage node"); + let mut gateway = gateway_command(cfg)? + .spawn() + .context("failed to spawn storage gateway")?; + let mut node = node_command(cfg)? + .spawn() + .context("failed to spawn storage node")?; + + let node_status = node.wait().context("failed to wait for storage node")?; + if let Err(e) = terminate_child(&mut gateway) { + log::warn!("failed to stop gateway after node exit: {}", e); + } + + if !node_status.success() { + bail!("storage node exited with status {}", node_status); + } + Ok(()) +} + +fn terminate_child(child: &mut Child) -> Result<()> { + if child.try_wait()?.is_none() { + child.kill()?; + } + let _ = child.wait(); + Ok(()) +} + +fn node_command(cfg: &StorageConfig) -> Result { + let node_bin = resolve_bin_path(&cfg.storage_node_bin, "node")?; + let mut cmd = Command::new(&node_bin); + cmd.arg("run") + .arg("--secret-key-file") + .arg(&cfg.bls_key_file) + .arg("--iroh-path") + .arg(&cfg.iroh_node_path) + .arg("--rpc-url") + .arg(&cfg.tendermint_rpc_url) + .arg("--eth-rpc-url") + .arg(&cfg.eth_rpc_url) + .arg("--batch-size") + .arg(cfg.node_batch_size.to_string()) + .arg("--poll-interval-secs") + .arg(cfg.node_poll_interval_secs.to_string()) + .arg("--max-concurrent-downloads") + .arg(cfg.node_max_concurrent_downloads.to_string()) + .arg("--rpc-bind-addr") + .arg(&cfg.node_rpc_bind_addr) + .env("FM_NETWORK", &cfg.network) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + + if let Some(v4) = &cfg.iroh_node_v4_addr { + cmd.arg("--iroh-v4-addr").arg(v4); + } + Ok(cmd) +} + +fn gateway_command(cfg: &StorageConfig) -> Result { + let gateway_bin = resolve_bin_path(&cfg.storage_gateway_bin, "gateway")?; + let mut cmd = Command::new(&gateway_bin); + cmd.arg("--secret-key-file") + .arg(&cfg.secret_key_file) + .arg("--bls-key-file") + .arg(&cfg.bls_key_file) + .arg("--rpc-url") + .arg(&cfg.tendermint_rpc_url) + .arg("--objects-listen-addr") + .arg(&cfg.objects_listen_addr) + .arg("--iroh-path") + .arg(&cfg.iroh_gateway_path) + .env("FM_NETWORK", &cfg.network) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + + if let Some(v4) = &cfg.iroh_gateway_v4_addr { + cmd.arg("--iroh-v4-addr").arg(v4); + } + Ok(cmd) +} + +async fn preflight(cfg: &StorageConfig) -> Result<()> { + if resolve_bin_path(&cfg.storage_node_bin, "node").is_err() { + bail!( + "storage node binary not found at {} (build: cargo build --release -p ipc-decentralized-storage --bin node --bin gateway)", + cfg.storage_node_bin.display() + ); + } + if resolve_bin_path(&cfg.storage_gateway_bin, "gateway").is_err() { + bail!( + "storage gateway binary not found at {}", + cfg.storage_gateway_bin.display() + ); + } + if !cfg.secret_key_file.exists() { + bail!( + "operator secret key file not found: {}", + cfg.secret_key_file.display() + ); + } + if !cfg.bls_key_file.exists() { + if let Some(parent) = cfg.bls_key_file.parent() { + tokio::fs::create_dir_all(parent).await.with_context(|| { + format!( + "failed to create BLS key directory at {}", + parent.display() + ) + })?; + } + let node_bin = resolve_bin_path(&cfg.storage_node_bin, "node")?; + log::info!( + "BLS key file not found at {}; generating one now", + cfg.bls_key_file.display() + ); + let status = Command::new(&node_bin) + .arg("generate-bls-key") + .arg("--output") + .arg(&cfg.bls_key_file) + .status() + .with_context(|| { + format!( + "failed to generate BLS key via {}", + node_bin.display() + ) + })?; + if !status.success() { + bail!( + "failed to generate BLS key at {} (exit status {})", + cfg.bls_key_file.display(), + status + ); + } + log::info!("Generated BLS key at {}", cfg.bls_key_file.display()); + } + if cfg + .secret_key_file + .ends_with(std::path::Path::new("fendermint/validator.sk")) + { + log::warn!( + "Storage config uses validator key ({}). If register-operator fails with sender/account errors, regenerate config with --secret-key-file pointing to a dedicated funded operator key.", + cfg.secret_key_file.display() + ); + } + + if !cfg.node_home.exists() { + bail!( + "node home directory does not exist: {}", + cfg.node_home.display() + ); + } + + if !cfg.iroh_node_path.exists() { + tokio::fs::create_dir_all(&cfg.iroh_node_path) + .await + .with_context(|| { + format!( + "failed to create node iroh directory at {}", + cfg.iroh_node_path.display() + ) + })?; + } + if !cfg.iroh_gateway_path.exists() { + tokio::fs::create_dir_all(&cfg.iroh_gateway_path) + .await + .with_context(|| { + format!( + "failed to create gateway iroh directory at {}", + cfg.iroh_gateway_path.display() + ) + })?; + } + Ok(()) +} + +fn resolve_bin_path(configured: &Path, bin_name: &str) -> Result { + if configured.exists() { + return Ok(configured.to_path_buf()); + } + + let fallback = workspace_root().join("target/release").join(bin_name); + if fallback.exists() { + log::warn!( + "Configured {} binary not found at {}; using fallback {}", + bin_name, + configured.display(), + fallback.display() + ); + return Ok(fallback); + } + + bail!( + "{} binary not found at {} or fallback {}", + bin_name, + configured.display(), + fallback.display() + ) +} + +fn workspace_root() -> PathBuf { + Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(|p| p.parent()) + .unwrap_or_else(|| Path::new(".")) + .to_path_buf() +} + +impl FromStr for StorageRunMode { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + match s.to_ascii_lowercase().as_str() { + "node" => Ok(Self::Node), + "gateway" => Ok(Self::Gateway), + "both" => Ok(Self::Both), + _ => bail!("invalid run mode '{}', expected node|gateway|both", s), + } + } +} diff --git a/ipc/cli/src/commands/storage/shared/bucket.rs b/ipc/cli/src/commands/storage/shared/bucket.rs new file mode 100644 index 0000000000..a94f116588 --- /dev/null +++ b/ipc/cli/src/commands/storage/shared/bucket.rs @@ -0,0 +1,459 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Bucket operations for on-chain storage management +//! +//! This module provides functions to interact with bucket smart contracts. + +use anyhow::{anyhow, Context, Result}; +use ethers::abi::{encode as abi_encode, Token}; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_bucket::{ + AddParams, GetParams, ListObjectsReturn, ListParams, Method as BucketMethod, Object, +}; +use fendermint_rpc::{ + message::GasParams, + tx::{BoundClient, TxClient, TxCommit}, + QueryClient, +}; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::{BytesSer, RawBytes}; +use fendermint_vm_actor_interface::evm; +use fvm_shared::{address::Address, chainid::ChainID, econ::TokenAmount, message::Message}; +use num_traits::Zero; +use std::collections::HashMap; + +const READ_GAS_LIMIT: u64 = 10_000_000_000; +const DEFAULT_GAS_PREMIUM_ATTO: u64 = 100_000; +/// Fallback gas limit when estimate_gas returns 0 (e.g. node rejects sequence=0 simulation). +/// Must cover the most expensive write path (deleteObject calls into the blobs actor). +const DEFAULT_TX_GAS_LIMIT: u64 = 500_000_000; +/// Headroom multiplier applied on top of the estimated gas limit to absorb estimation drift. +const GAS_LIMIT_BUFFER_NUM: u64 = 5; +const GAS_LIMIT_BUFFER_DEN: u64 = 4; // Ɨ 1.25 + +/// Estimate dynamic gas parameters for a transaction. +pub async fn tx_gas_params( + client: &C, + from: Address, + to: Address, + method_num: u64, + params: RawBytes, + value: TokenAmount, +) -> Result +where + C: QueryClient + Send + Sync, +{ + let state_params = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to query state params for gas estimation")?; + let base_fee = state_params.value.base_fee; + + let estimate_msg = Message { + version: Default::default(), + from, + to, + sequence: 0, + value: value.clone(), + method_num, + params, + gas_limit: 0, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + let gas_estimate = client + .estimate_gas(estimate_msg, FvmQueryHeight::default()) + .await + .context("failed to estimate gas")?; + + let gas_premium = TokenAmount::from_atto(DEFAULT_GAS_PREMIUM_ATTO); + let gas_fee_cap = base_fee + gas_premium.clone(); + + let gas_limit = if gas_estimate.value.gas_limit == 0 { + // Some RPC configurations return 0 from estimate_gas (e.g. when sequence=0 is + // rejected). Fall back to a generous limit that covers the most expensive write path. + DEFAULT_TX_GAS_LIMIT + } else { + // Apply a 25% headroom buffer to absorb estimation drift. + gas_estimate.value.gas_limit * GAS_LIMIT_BUFFER_NUM / GAS_LIMIT_BUFFER_DEN + }; + + Ok(GasParams { + gas_limit, + gas_fee_cap, + gas_premium, + }) +} + +/// Add an object to a bucket +/// +/// This registers an object's metadata on-chain after the blob has been uploaded +/// to the gateway and distributed to storage nodes. +pub async fn add_object( + client: &mut C, + bucket_address: Address, + source: B256, + key: String, + hash: B256, + recovery_hash: B256, + size: u64, + metadata: HashMap, + data_shards: u16, + parity_shards: u16, +) -> Result<()> +where + C: BoundClient + QueryClient + TxClient + Send + Sync, +{ + let params = AddParams { + source, + key: key.into_bytes(), + hash, + recovery_hash, + size, + ttl: None, // Use default TTL + metadata, + overwrite: false, + data_shards, + parity_shards, + }; + + let key = String::from_utf8(params.key.clone()).context("Invalid UTF-8 in object key")?; + let key_for_err = key.clone(); + let calldata = { + let mut bytes = Vec::with_capacity(4 + 32 * 7); + // addObject(bytes32,string,bytes32,bytes32,uint64,uint16,uint16) + bytes.extend_from_slice(&[0x95, 0x79, 0xba, 0xf9]); + bytes.extend_from_slice(&abi_encode(&[ + Token::FixedBytes(params.source.0.to_vec()), + Token::String(key), + Token::FixedBytes(params.hash.0.to_vec()), + Token::FixedBytes(params.recovery_hash.0.to_vec()), + Token::Uint(params.size.into()), + Token::Uint((params.data_shards as u64).into()), + Token::Uint((params.parity_shards as u64).into()), + ])); + bytes + }; + let invoke_params = RawBytes::serialize(BytesSer(&calldata)) + .context("Failed to serialize FEVM calldata for addObject")?; + + let sender = client.address(); + let gas_params = tx_gas_params( + client, + sender, + bucket_address, + evm::Method::InvokeContract as u64, + invoke_params, + TokenAmount::zero(), + ) + .await + .context("Failed to estimate AddObject gas parameters")?; + + let res = TxClient::::fevm_invoke( + client, + bucket_address, + calldata.into(), + TokenAmount::zero(), + gas_params, + ) + .await + .map_err(|e| { + anyhow!( + "Failed to send AddObject transaction: {} (sender={} bucket={} key={})", + e, + sender, + bucket_address, + key_for_err + ) + })?; + + if res.response.check_tx.code.is_err() { + let log = &res.response.check_tx.log; + let info = &res.response.check_tx.info; + return Err(anyhow!( + "AddObject check_tx failed (code {:?}): log={} info={} sender={} bucket={} key={}", + res.response.check_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + sender, + bucket_address, + key_for_err + )); + } + + if res.response.deliver_tx.code.is_err() { + let log = &res.response.deliver_tx.log; + let info = &res.response.deliver_tx.info; + return Err(anyhow!( + "AddObject deliver_tx failed (code {:?}): log={} info={} sender={} bucket={} key={}", + res.response.deliver_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + sender, + bucket_address, + key_for_err + )); + } + + Ok(()) +} + +/// Get an object from a bucket +pub async fn get_object( + client: &mut C, + bucket_address: Address, + key: String, +) -> Result> +where + C: QueryClient + Send + Sync, +{ + let params = GetParams(key.into_bytes()); + let params_bytes = RawBytes::serialize(params).context("Failed to serialize GetParams")?; + + let msg = Message { + version: Default::default(), + from: fendermint_vm_actor_interface::system::SYSTEM_ACTOR_ADDR, + to: bucket_address, + sequence: 0, + value: TokenAmount::zero(), + method_num: BucketMethod::GetObject as u64, + params: params_bytes, + gas_limit: READ_GAS_LIMIT, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = client + .call(msg, FvmQueryHeight::default()) + .await + .context("Failed to execute GetObject call")?; + + if response.value.code.is_err() { + return Err(anyhow!("GetObject query failed: {}", response.value.info)); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("Failed to decode response data")?; + + let result = fvm_ipld_encoding::from_slice::>(&return_data) + .context("Failed to decode GetObject response")?; + + Ok(result) +} + +/// List objects in a bucket +pub async fn list_objects( + client: &C, + bucket_address: Address, + prefix: Option, + delimiter: Option, + start_key: Option, + limit: u64, +) -> Result +where + C: QueryClient + Send + Sync, +{ + let params = ListParams { + prefix: prefix.unwrap_or_default().into_bytes(), + delimiter: delimiter.unwrap_or_default().into_bytes(), + start_key: start_key.map(|s| s.into_bytes()), + limit, + }; + + let params_bytes = RawBytes::serialize(params).context("Failed to serialize ListParams")?; + + let msg = fvm_shared::message::Message { + version: Default::default(), + from: fendermint_vm_actor_interface::system::SYSTEM_ACTOR_ADDR, + to: bucket_address, + sequence: 0, + value: TokenAmount::zero(), + method_num: BucketMethod::ListObjects as u64, + params: params_bytes, + gas_limit: READ_GAS_LIMIT, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = client + .call(msg, FvmQueryHeight::default()) + .await + .context("Failed to execute ListObjects call")?; + + if response.value.code.is_err() { + return Err(anyhow!("ListObjects query failed: {}", response.value.info)); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("Failed to decode response data")?; + + let result = fvm_ipld_encoding::from_slice::(&return_data) + .context("Failed to decode ListObjects response")?; + + Ok(result) +} + +/// Delete an object from a bucket +pub async fn delete_object(client: &mut C, bucket_address: Address, key: String) -> Result<()> +where + C: BoundClient + QueryClient + TxClient + Send + Sync, +{ + let key_for_err = key.clone(); + let calldata = { + let mut bytes = Vec::with_capacity(4 + 64); + // deleteObject(string) + bytes.extend_from_slice(&[0x2d, 0x7c, 0xb6, 0x00]); + bytes.extend_from_slice(&abi_encode(&[Token::String(key)])); + bytes + }; + let invoke_params = RawBytes::serialize(BytesSer(&calldata)) + .context("Failed to serialize FEVM calldata for deleteObject")?; + + let sender = client.address(); + let gas_params = tx_gas_params( + client, + sender, + bucket_address, + evm::Method::InvokeContract as u64, + invoke_params, + TokenAmount::zero(), + ) + .await + .context("Failed to estimate DeleteObject gas parameters")?; + + let res = TxClient::::fevm_invoke( + client, + bucket_address, + calldata.into(), + TokenAmount::zero(), + gas_params, + ) + .await + .map_err(|e| { + anyhow!( + "Failed to send DeleteObject transaction: {} (sender={} bucket={} key={})", + e, + sender, + bucket_address, + key_for_err + ) + })?; + + if res.response.check_tx.code.is_err() { + let log = &res.response.check_tx.log; + let info = &res.response.check_tx.info; + return Err(anyhow!( + "DeleteObject check_tx failed (code {:?}): log={} info={} sender={} bucket={} key={}", + res.response.check_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + sender, + bucket_address, + key_for_err + )); + } + + if res.response.deliver_tx.code.is_err() { + let log = &res.response.deliver_tx.log; + let info = &res.response.deliver_tx.info; + return Err(anyhow!( + "DeleteObject deliver_tx failed (code {:?}): log={} info={} sender={} bucket={} key={}", + res.response.deliver_tx.code, + if log.is_empty() { "" } else { log }, + if info.is_empty() { "" } else { info }, + sender, + bucket_address, + key_for_err + )); + } + + Ok(()) +} + +/// Query the chain ID from the network +pub async fn query_chain_id(client: &C) -> Result +where + C: QueryClient + Send + Sync, +{ + let state_params = client + .state_params(FvmQueryHeight::default()) + .await + .context("Failed to query state params for chain ID")?; + + Ok(ChainID::from(state_params.value.chain_id)) +} + +/// Convert a hex string to a B256, with length validation. +/// +/// Accepts with or without "0x" prefix. Returns an error if the decoded +/// bytes are not exactly 32 bytes long. +pub fn hex_to_b256(hex_str: &str) -> Result { + let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str); + let bytes = hex::decode(hex_str).context("Invalid hex string")?; + if bytes.len() != 32 { + return Err(anyhow!( + "Expected 32 bytes, got {} bytes from hex string", + bytes.len() + )); + } + let mut array = [0u8; 32]; + array.copy_from_slice(&bytes); + Ok(B256(array)) +} + +/// Convert a hash string to B256, auto-detecting hex or base32 encoding. +/// +/// Supports: +/// - Hex with "0x" prefix +/// - Hex (64 hex chars) +/// - Base32 lower-case no-padding (iroh/blake3 format, 52 chars) +pub fn hash_to_b256(s: &str) -> Result { + if s.starts_with("0x") || (s.len() == 64 && s.chars().all(|c| c.is_ascii_hexdigit())) { + return hex_to_b256(s); + } + // Try base32 (lower-case no-padding, as used by iroh) + let bytes = base32_decode_nopad(s).context("Failed to decode as base32")?; + if bytes.len() < 32 { + return Err(anyhow!( + "Expected at least 32 bytes, got {} from base32 string", + bytes.len() + )); + } + let mut array = [0u8; 32]; + array.copy_from_slice(&bytes[..32]); + Ok(B256(array)) +} + +/// Decode RFC 4648 base32 (case-insensitive, no padding required). +fn base32_decode_nopad(input: &str) -> Result> { + const ALPHABET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; + + fn val(c: u8) -> Result { + let c = c.to_ascii_uppercase(); + ALPHABET + .iter() + .position(|&a| a == c) + .map(|p| p as u8) + .ok_or_else(|| anyhow!("invalid base32 character: {}", c as char)) + } + + let input = input.as_bytes(); + let mut buf = Vec::with_capacity(input.len() * 5 / 8); + let mut bits: u32 = 0; + let mut n_bits: u32 = 0; + + for &c in input { + if c == b'=' { + break; + } + bits = (bits << 5) | val(c)? as u32; + n_bits += 5; + if n_bits >= 8 { + n_bits -= 8; + buf.push((bits >> n_bits) as u8); + bits &= (1 << n_bits) - 1; + } + } + Ok(buf) +} diff --git a/ipc/cli/src/commands/storage/shared/client_context.rs b/ipc/cli/src/commands/storage/shared/client_context.rs new file mode 100644 index 0000000000..3b826416f7 --- /dev/null +++ b/ipc/cli/src/commands/storage/shared/client_context.rs @@ -0,0 +1,162 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +use anyhow::{anyhow, Context, Result}; +use fendermint_crypto::SecretKey; +use fvm_shared::address::Address; +use ipc_wallet::EvmKeyStore; +use std::path::PathBuf; +use std::str::FromStr; + +use crate::commands::storage::config::{ + resolve_client_config_path, resolve_provider_config_path, StorageClientConfig, StorageConfig, +}; +use crate::GlobalArguments; + +pub struct WriteContext { + pub rpc_url: String, + pub secret_key: SecretKey, +} + +pub fn resolve_rpc_url(config: Option) -> Result { + let client_config_path = resolve_client_config_path(config.clone()); + let provider_config_path = resolve_provider_config_path(config); + + if client_config_path.exists() { + let client_cfg = StorageClientConfig::load(&client_config_path).with_context(|| { + format!( + "failed to load client storage config at {}", + client_config_path.display() + ) + })?; + if client_cfg.tendermint_rpc_url.trim().is_empty() { + return Err(anyhow!( + "client storage config at {} has empty tendermint-rpc-url", + client_config_path.display() + )); + } + Ok(client_cfg.tendermint_rpc_url) + } else if provider_config_path.exists() { + Ok(StorageConfig::load(&provider_config_path) + .with_context(|| { + format!( + "failed to load provider storage config at {}", + provider_config_path.display() + ) + })? + .tendermint_rpc_url) + } else { + Err(anyhow!( + "No storage config found. Expected one of:\n\ + - client config: {}\n\ + - provider config: {}\n\ + Initialize client mode with 'ipc-cli storage client init ...' or pass --config.", + client_config_path.display(), + provider_config_path.display() + )) + } +} + +pub fn resolve_default_owner_from_client_config(config: Option) -> Result> { + let client_config_path = resolve_client_config_path(config); + if !client_config_path.exists() { + return Ok(None); + } + let client_cfg = StorageClientConfig::load(&client_config_path).with_context(|| { + format!( + "failed to load client storage config at {}", + client_config_path.display() + ) + })?; + if let Some(addr) = client_cfg.address { + return Ok(Some(crate::require_fil_addr_from_str(&addr)?)); + } + Ok(None) +} + +pub fn resolve_write_context(global: &GlobalArguments, config: Option) -> Result { + let client_config_path = resolve_client_config_path(config.clone()); + let provider_config_path = resolve_provider_config_path(config); + + if client_config_path.exists() { + let client_cfg = StorageClientConfig::load(&client_config_path).with_context(|| { + format!( + "failed to load client storage config at {}", + client_config_path.display() + ) + })?; + if client_cfg.tendermint_rpc_url.trim().is_empty() { + return Err(anyhow!( + "client storage config at {} has empty tendermint-rpc-url", + client_config_path.display() + )); + } + + let provider = crate::commands::get_ipc_provider(global) + .context("failed to load IPC provider config to resolve client-mode signer key")?; + let keystore = provider + .evm_wallet() + .context("failed to access EVM wallet for client-mode signer")?; + let mut keystore = keystore.write().unwrap(); + + let configured_evm = client_cfg + .address + .as_ref() + .and_then(|s| ethers::types::Address::from_str(s).ok()) + .map(Into::into); + let signer_evm = if let Some(addr) = configured_evm { + Some(addr) + } else { + keystore + .get_default() + .context("failed to get default EVM wallet address")? + } + .ok_or_else(|| { + anyhow!( + "no signer key available in client mode: set `address` in storage client config \ + to an EVM address present in your wallet, or set a default with \ + 'ipc-cli wallet set-default --wallet-type evm --address <0x...>'" + ) + })?; + let key_info = keystore + .get(&signer_evm) + .context("failed to load EVM wallet key for client-mode signer")? + .ok_or_else(|| anyhow!("configured/default EVM wallet key {} not found", signer_evm))?; + let secret_key = SecretKey::try_from(key_info.private_key().to_vec()) + .context("configured/default EVM key is not a valid secp256k1 key")?; + + Ok(WriteContext { + rpc_url: client_cfg.tendermint_rpc_url, + secret_key, + }) + } else if provider_config_path.exists() { + let cfg = StorageConfig::load(&provider_config_path).with_context(|| { + format!( + "failed to load provider storage config at {}", + provider_config_path.display() + ) + })?; + let secret_key = fendermint_rpc::message::SignedMessageFactory::read_secret_key( + &cfg.secret_key_file, + ) + .with_context(|| { + format!( + "failed to read provider secret key from {}", + cfg.secret_key_file.display() + ) + })?; + Ok(WriteContext { + rpc_url: cfg.tendermint_rpc_url, + secret_key, + }) + } else { + Err(anyhow!( + "No storage config found. Expected one of:\n\ + - client config: {}\n\ + - provider config: {}\n\ + Initialize client mode with 'ipc-cli storage client init ...' or pass --config.", + client_config_path.display(), + provider_config_path.display() + )) + } +} diff --git a/ipc/cli/src/commands/storage/shared/config.rs b/ipc/cli/src/commands/storage/shared/config.rs new file mode 100644 index 0000000000..1461d96eed --- /dev/null +++ b/ipc/cli/src/commands/storage/shared/config.rs @@ -0,0 +1,244 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +use anyhow::Result; +use fs_err as fs; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; + +fn ipc_config_dir() -> PathBuf { + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(".ipc") +} + +pub fn legacy_storage_config_path() -> PathBuf { + ipc_config_dir().join("storage.yaml") +} + +pub fn default_storage_provider_config_path() -> PathBuf { + ipc_config_dir() + .join("storage") + .join("node") + .join("config.yaml") +} + +pub fn default_storage_client_config_path() -> PathBuf { + ipc_config_dir() + .join("storage") + .join("client") + .join("config.yaml") +} + +fn old_storage_provider_config_path() -> PathBuf { + ipc_config_dir().join("storage-provider.yaml") +} + +fn old_storage_client_config_path() -> PathBuf { + ipc_config_dir().join("storage-client.yaml") +} + +/// Resolve provider config path with fallback to legacy storage.yaml. +pub fn resolve_provider_config_path(explicit: Option) -> PathBuf { + if let Some(path) = explicit { + return path; + } + let provider = default_storage_provider_config_path(); + if provider.exists() { + return provider; + } + let old_provider = old_storage_provider_config_path(); + if old_provider.exists() { + return old_provider; + } + let legacy = legacy_storage_config_path(); + if legacy.exists() { + return legacy; + } + provider +} + +/// Resolve client config path with fallback to legacy storage.yaml. +pub fn resolve_client_config_path(explicit: Option) -> PathBuf { + if let Some(path) = explicit { + return path; + } + let client = default_storage_client_config_path(); + if client.exists() { + return client; + } + let old_client = old_storage_client_config_path(); + if old_client.exists() { + return old_client; + } + let legacy = legacy_storage_config_path(); + if legacy.exists() { + return legacy; + } + client +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +pub struct StorageClientConfig { + /// Tendermint RPC endpoint used for read-only chain queries. + pub tendermint_rpc_url: String, + /// Gateway URL for object download/read operations. + pub gateway_url: Option, + /// Optional default account address for user-oriented queries. + pub address: Option, +} + +impl StorageClientConfig { + pub fn load>(path: P) -> Result { + let path = path.as_ref(); + let contents = fs::read_to_string(path)?; + let cfg: StorageClientConfig = serde_yaml::from_str(&contents) + .map_err(|e| anyhow::anyhow!("Failed to parse {}: {}", path.display(), e))?; + Ok(cfg) + } + + pub fn save>(&self, path: P) -> Result<()> { + let path = path.as_ref(); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + let contents = serde_yaml::to_string(self)?; + fs::write(path, contents)?; + Ok(()) + } + + pub fn default_with_local_rpc() -> Self { + Self { + tendermint_rpc_url: "http://127.0.0.1:26657".to_string(), + gateway_url: None, + address: None, + } + } +} + +pub fn resolve_client_gateway_url( + explicit_gateway: Option<&str>, + explicit_config: Option, + interactive: bool, +) -> Result { + if let Some(url) = explicit_gateway { + return Ok(url.to_string()); + } + + if let Ok(url) = std::env::var("IPC_STORAGE_GATEWAY") { + if !url.is_empty() { + return Ok(url); + } + } + + let config_path = resolve_client_config_path(explicit_config); + if config_path.exists() { + let mut cfg = if let Ok(client_cfg) = StorageClientConfig::load(&config_path) { + client_cfg + } else { + StorageClientConfig::default_with_local_rpc() + }; + if let Some(url) = &cfg.gateway_url { + if !url.is_empty() { + return Ok(url.clone()); + } + } + if interactive { + println!("Gateway URL not configured."); + println!("Please enter the storage gateway URL (e.g., http://localhost:8080):"); + let mut input = String::new(); + std::io::stdin().read_line(&mut input)?; + let url = input.trim().to_string(); + if url.is_empty() { + anyhow::bail!("Gateway URL cannot be empty"); + } + cfg.gateway_url = Some(url.clone()); + cfg.save(&config_path)?; + println!("Gateway URL saved to {}", config_path.display()); + return Ok(url); + } + } + + anyhow::bail!( + "Gateway URL not configured. Set via:\n\ + 1. --gateway flag\n\ + 2. IPC_STORAGE_GATEWAY environment variable\n\ + 3. gateway_url in storage client config (`ipc-cli storage client init` / `... set`)" + ) +} + +/// Which storage components to run. +#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +pub enum StorageRunMode { + Node, + Gateway, + #[default] + Both, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct StorageConfig { + /// IPC node home, usually "~/.node-ipc". + pub node_home: PathBuf, + /// Source node-init config used to derive defaults. + pub node_config: PathBuf, + /// Path to ipc-storage node binary (ipc-decentralized-storage `node`). + pub storage_node_bin: PathBuf, + /// Path to ipc-storage gateway binary. + pub storage_gateway_bin: PathBuf, + + /// FM network passed to storage binaries (testnet/mainnet). + pub network: String, + + /// Tendermint RPC endpoint of the subnet node. + pub tendermint_rpc_url: String, + /// EVM JSON-RPC endpoint of the subnet node. + pub eth_rpc_url: String, + + /// Secp256k1 key for signing chain transactions. + pub secret_key_file: PathBuf, + /// BLS key used by storage node/operator. + pub bls_key_file: PathBuf, + + /// Operator API URL published on-chain during registration. + pub operator_rpc_url: String, + + /// Run mode for `ipc-cli storage run`. + pub run_mode: StorageRunMode, + + /// Storage-node settings + pub node_rpc_bind_addr: String, + pub iroh_node_path: PathBuf, + pub iroh_node_v4_addr: Option, + pub node_batch_size: u32, + pub node_poll_interval_secs: u64, + pub node_max_concurrent_downloads: usize, + + /// Gateway settings + pub objects_listen_addr: String, + pub iroh_gateway_path: PathBuf, + pub iroh_gateway_v4_addr: Option, +} + +impl StorageConfig { + pub fn load>(path: P) -> Result { + let path = path.as_ref(); + let contents = fs::read_to_string(path)?; + let cfg: StorageConfig = serde_yaml::from_str(&contents) + .map_err(|e| anyhow::anyhow!("Failed to parse {}: {}", path.display(), e))?; + Ok(cfg) + } + + pub fn save>(&self, path: P) -> Result<()> { + let path = path.as_ref(); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + let contents = serde_yaml::to_string(self)?; + fs::write(path, contents)?; + Ok(()) + } +} diff --git a/ipc/cli/src/commands/storage/shared/gateway.rs b/ipc/cli/src/commands/storage/shared/gateway.rs new file mode 100644 index 0000000000..b34ee76fb8 --- /dev/null +++ b/ipc/cli/src/commands/storage/shared/gateway.rs @@ -0,0 +1,221 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! HTTP client for storage gateway API +//! +//! This module provides functions to interact with the storage gateway's HTTP API. + +use anyhow::{anyhow, Context, Result}; +use fvm_shared::address::Address; +use reqwest::multipart::{Form, Part}; +use serde::{Deserialize, Serialize}; +use std::time::Duration; + +/// Response from uploading a blob to the gateway +#[derive(Debug, Serialize, Deserialize)] +pub struct UploadResponse { + /// Original blob content hash + pub hash: String, + /// Number of chunks the data was split into + pub num_chunks: usize, + /// Number of data shards per chunk (k) + pub data_shards: usize, + /// Number of parity shards per chunk (m) + pub parity_shards: usize, + /// Original data length in bytes + pub original_len: usize, +} + +/// Node address information from the gateway +#[derive(Debug, Serialize, Deserialize)] +pub struct NodeInfo { + pub node_id: String, + pub relay_url: Option, + pub direct_addresses: Vec, +} + +/// Storage gateway HTTP client +pub struct GatewayClient { + base_url: String, + client: reqwest::Client, +} + +impl GatewayClient { + /// Create a new gateway client + pub fn new(base_url: String) -> Result { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(300)) // 5 minute timeout for large uploads + .build() + .context("Failed to create HTTP client")?; + + Ok(Self { base_url, client }) + } + + /// Upload a blob to the gateway + /// + /// This uploads the data to Iroh, erasure-encodes it, and distributes shards + /// to the assigned storage nodes. + pub async fn upload_blob(&self, data: Vec) -> Result { + let size = data.len() as u64; + + // Build multipart form + let form = Form::new().text("size", size.to_string()).part( + "data", + Part::bytes(data) + .file_name("upload") + .mime_str("application/octet-stream")?, + ); + + let url = format!("{}/v1/objects", self.base_url); + + let response = self + .client + .post(&url) + .multipart(form) + .send() + .await + .context("Failed to send upload request")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + return Err(anyhow!("Upload failed with status {}: {}", status, body)); + } + + let upload_response: UploadResponse = response + .json() + .await + .context("Failed to parse upload response")?; + + Ok(upload_response) + } + + /// Download an object from a bucket by its key/path + /// + /// This queries the bucket contract for the object metadata, then retrieves + /// the blob data. + pub async fn download_object( + &self, + bucket_address: &Address, + key: &str, + height: Option, + ) -> Result> { + let bucket_str = bucket_address.to_string(); + let encoded_key = urlencoding::encode(key); + + let mut url = format!( + "{}/v1/objects/{}/{}", + self.base_url, bucket_str, encoded_key + ); + if let Some(h) = height { + url.push_str(&format!("?height={}", h)); + } + + let response = self + .client + .get(&url) + .send() + .await + .context("Failed to send download request")?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + return Err(anyhow!("Object not found: {}", key)); + } + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + return Err(anyhow!("Download failed with status {}: {}", status, body)); + } + + let data = response + .bytes() + .await + .context("Failed to read download response")?; + + Ok(data.to_vec()) + } + + /// Download a blob directly by its hex hash. + /// + /// Calls `GET /v1/blobs/{hash}` on the gateway. The gateway looks up the blob in the + /// blobs actor, fetches erasure-coded shards from storage nodes, and returns the + /// reconstructed content. + /// + /// Note: this endpoint is different from the Node RPC's `/v1/blobs/{hash}/content` + /// which lives on a different port and is not exposed to CLI clients. + pub async fn download_blob(&self, blob_hash: &str, height: Option) -> Result> { + let mut url = format!("{}/v1/blobs/{}", self.base_url, blob_hash); + if let Some(h) = height { + url.push_str(&format!("?height={}", h)); + } + + let response = self + .client + .get(&url) + .send() + .await + .context("Failed to send blob download request")?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + return Err(anyhow!("Blob not found: {}", blob_hash)); + } + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + return Err(anyhow!( + "Blob download failed with status {}: {}", + status, + body + )); + } + + let data = response + .bytes() + .await + .context("Failed to read blob download response")?; + + Ok(data.to_vec()) + } + + /// Get the gateway node information + pub async fn get_node_info(&self) -> Result { + let url = format!("{}/v1/node", self.base_url); + + let response = self + .client + .get(&url) + .send() + .await + .context("Failed to send node info request")?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + return Err(anyhow!( + "Node info request failed with status {}: {}", + status, + body + )); + } + + let node_info: NodeInfo = response + .json() + .await + .context("Failed to parse node info response")?; + + Ok(node_info) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_client_creation() { + let client = GatewayClient::new("http://localhost:8080".to_string()); + assert!(client.is_ok()); + } +} diff --git a/ipc/cli/src/commands/storage/shared/mod.rs b/ipc/cli/src/commands/storage/shared/mod.rs new file mode 100644 index 0000000000..ab795245d2 --- /dev/null +++ b/ipc/cli/src/commands/storage/shared/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +pub mod bucket; +pub mod client_context; +pub mod config; +pub mod gateway; +pub mod path; diff --git a/ipc/cli/src/commands/storage/shared/path.rs b/ipc/cli/src/commands/storage/shared/path.rs new file mode 100644 index 0000000000..c38faf11a9 --- /dev/null +++ b/ipc/cli/src/commands/storage/shared/path.rs @@ -0,0 +1,143 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: MIT + +//! Path parsing for storage URIs +//! +//! This module handles parsing of ipc:// URIs for storage operations. +//! Format: ipc://bucket_address/path/to/object +//! +//! Examples: +//! - ipc://0x1234.../documents/file.txt +//! - ipc://t1.../data/backup.tar.gz + +use anyhow::{anyhow, Context, Result}; +use fvm_shared::address::{Address, Error as NetworkError, Network}; +use std::str::FromStr; + +/// Represents a parsed storage path +#[derive(Debug, Clone)] +pub struct StoragePath { + /// The bucket contract address + pub bucket_address: Address, + /// The object key/path within the bucket + pub key: String, +} + +impl StoragePath { + /// Parse a storage URI in the format ipc://bucket_address/path/to/object + pub fn parse(uri: &str) -> Result { + if !uri.starts_with("ipc://") { + return Err(anyhow!("Storage path must start with ipc://")); + } + + let path_part = &uri[6..]; // Remove "ipc://" + + // Find the first '/' to separate bucket address from key + let (bucket_str, key) = match path_part.find('/') { + Some(idx) => { + let bucket = &path_part[..idx]; + let key = &path_part[idx + 1..]; // Skip the '/' + (bucket, key.to_string()) + } + None => { + // No key provided, just bucket address + (path_part, String::new()) + } + }; + + if bucket_str.is_empty() { + return Err(anyhow!("Bucket address cannot be empty")); + } + + let bucket_address = parse_address(bucket_str) + .with_context(|| format!("Invalid bucket address: {}", bucket_str))?; + + Ok(StoragePath { + bucket_address, + key, + }) + } + + /// Check if this path represents a bucket root (no key specified) + pub fn is_bucket_root(&self) -> bool { + self.key.is_empty() + } + + /// Convert to a string representation + pub fn to_uri(&self) -> String { + if self.key.is_empty() { + format!("ipc://{}", self.bucket_address) + } else { + format!("ipc://{}/{}", self.bucket_address, self.key) + } + } +} + +/// Parse an address from a string (supports f/t addresses and 0x addresses) +pub fn parse_address(s: &str) -> Result

{ + // Try parsing as Filecoin address (f/t prefix) + let addr = Network::Mainnet + .parse_address(s) + .or_else(|e| match e { + NetworkError::UnknownNetwork => Network::Testnet.parse_address(s), + _ => Err(e), + }) + .or_else(|_| { + // Try parsing as Ethereum address (0x prefix) + let eth_addr = ethers::types::Address::from_str(s)?; + ipc_api::ethers_address_to_fil_address(ð_addr) + })?; + + Ok(addr) +} + +/// Check if a path string is a storage path (starts with ipc://) +pub fn is_storage_path(path: &str) -> bool { + path.starts_with("ipc://") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_storage_path() { + let path = StoragePath::parse( + "ipc://0x1234567890123456789012345678901234567890/documents/file.txt", + ) + .expect("should parse"); + + assert_eq!(path.key, "documents/file.txt"); + } + + #[test] + fn test_parse_bucket_root() { + let path = StoragePath::parse("ipc://0x1234567890123456789012345678901234567890") + .expect("should parse"); + + assert!(path.is_bucket_root()); + assert_eq!(path.key, ""); + } + + #[test] + fn test_parse_bucket_with_trailing_slash() { + let path = StoragePath::parse("ipc://0x1234567890123456789012345678901234567890/") + .expect("should parse"); + + assert_eq!(path.key, ""); + } + + #[test] + fn test_invalid_uri() { + assert!(StoragePath::parse("http://bucket/file").is_err()); + assert!(StoragePath::parse("bucket/file").is_err()); + assert!(StoragePath::parse("ipc://").is_err()); + } + + #[test] + fn test_to_uri() { + let uri = "ipc://0x1234567890123456789012345678901234567890/path/to/file.txt"; + let path = StoragePath::parse(uri).expect("should parse"); + assert_eq!(path.to_uri(), uri); + } +} diff --git a/ipc/cli/src/commands/subnet/init/config.rs b/ipc/cli/src/commands/subnet/init/config.rs index ee6c5d8890..9295678073 100644 --- a/ipc/cli/src/commands/subnet/init/config.rs +++ b/ipc/cli/src/commands/subnet/init/config.rs @@ -99,6 +99,19 @@ pub enum ActivateConfig { }, } +/// Preset for generating topdown configuration in node config templates. +#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)] +#[serde(rename_all = "kebab-case")] +pub enum NodeTopdownMode { + /// Auto-detect from subnet-create settings. If `parent-filecoin-rpc` is set, use F3. + #[default] + Auto, + /// Generate legacy vote-based topdown configuration. + Legacy, + /// Generate F3 proof-based topdown configuration. + F3, +} + /// Top-level YAML schema for `subnet init` #[derive(Debug, Deserialize)] #[serde(rename_all = "kebab-case")] @@ -120,6 +133,10 @@ pub struct SubnetInitConfig { /// Configuration for subnet genesis #[serde(default)] pub genesis: Option, + + /// Preset for auto-generated node topdown config (auto|legacy|f3). + #[serde(default)] + pub node_topdown_mode: NodeTopdownMode, } impl SubnetInitConfig { diff --git a/ipc/cli/src/commands/subnet/init/handlers.rs b/ipc/cli/src/commands/subnet/init/handlers.rs index cbd57307f8..3150160634 100644 --- a/ipc/cli/src/commands/subnet/init/handlers.rs +++ b/ipc/cli/src/commands/subnet/init/handlers.rs @@ -11,12 +11,14 @@ use crate::commands::wallet::import::import_wallet; use crate::ipc_config_store::IpcConfigStore; use crate::commands::subnet::init::config::{ - ActivateConfig, DeployConfig, SubnetCreateConfig, SubnetInitConfig, WalletImportArgs, + ActivateConfig, DeployConfig, NodeTopdownMode, SubnetCreateConfig, SubnetInitConfig, + WalletImportArgs, }; use crate::{get_ipc_provider, GlobalArguments}; use anyhow::{Context, Result}; use fendermint_vm_actor_interface::init::builtin_actor_eth_addr; use fendermint_vm_actor_interface::ipc::{self}; +use fvm_shared::address::Payload; use ipc_api::subnet_id::SubnetID; use ipc_provider::new_evm_keystore_from_config; use ipc_provider::IpcProvider; @@ -40,11 +42,29 @@ pub async fn handle_init(global: &GlobalArguments, init_cfg: &SubnetInitConfig) deploy_contracts(deploy_cfg, &ipc_config_store).await?; } + let resolved_topdown_mode = resolve_topdown_mode( + init_cfg.node_topdown_mode, + init_cfg.create.parent_filecoin_rpc.as_ref(), + ); + + if matches!(resolved_topdown_mode, NodeTopdownMode::F3) + && init_cfg.create.parent_filecoin_rpc.is_none() + { + anyhow::bail!( + "node-topdown-mode=f3 requires create.parent-filecoin-rpc to capture deterministic F3 genesis data" + ); + } + let mut provider = get_ipc_provider(global)?; // 3) Create and approve subnet - let created = - create_and_approve_subnet(&init_cfg.create, &ipc_config_store, &mut provider).await?; + let created = create_and_approve_subnet( + &init_cfg.create, + resolved_topdown_mode, + &ipc_config_store, + &mut provider, + ) + .await?; // 4) Optionally activate and generate genesis let created_genesis = if let Some(act_cfg) = &init_cfg.activate { @@ -71,11 +91,16 @@ pub async fn handle_init(global: &GlobalArguments, init_cfg: &SubnetInitConfig) // Use the global config directory let dir = global.config_dir(); + let ipc_snapshot = ipc_config_store.snapshot().await; + let node_config_path = generate_node_config( &created.subnet_id, &created.parent_subnet_id, &genesis.sealed, init_cfg.activate.as_ref(), + resolved_topdown_mode, + init_cfg.create.parent_filecoin_rpc.as_ref(), + &ipc_snapshot, &dir, ) .await?; @@ -92,7 +117,7 @@ pub async fn handle_init(global: &GlobalArguments, init_cfg: &SubnetInitConfig) let subnet_info_path = generate_subnet_info( &created.subnet_id, &created.parent_subnet_id, - &ipc_config_store.snapshot().await, + &ipc_snapshot, genesis, gen_cfg, init_cfg.activate.as_ref(), @@ -173,11 +198,19 @@ struct CreatedSubnet { /// Creates and approves the subnet on-chain. async fn create_and_approve_subnet( cfg: &SubnetCreateConfig, + topdown_mode: NodeTopdownMode, store: &IpcConfigStore, provider: &mut IpcProvider, ) -> Result { log::info!("Creating subnet"); - let actor_addr = create_subnet_cmd(provider.clone(), cfg).await?; + let mut create_cfg = cfg.clone(); + if matches!(topdown_mode, NodeTopdownMode::Legacy) { + // Legacy mode must not capture/store F3 instance ID in subnet actor. + create_cfg.parent_filecoin_rpc = None; + create_cfg.parent_filecoin_auth_token = None; + } + + let actor_addr = create_subnet_cmd(provider.clone(), &create_cfg).await?; let parent_id = SubnetID::from_str(&cfg.parent)?; let parent = store @@ -212,6 +245,22 @@ async fn create_and_approve_subnet( }) } +fn resolve_topdown_mode( + configured_mode: NodeTopdownMode, + parent_filecoin_rpc: Option<&url::Url>, +) -> NodeTopdownMode { + match configured_mode { + NodeTopdownMode::Auto => { + if parent_filecoin_rpc.is_some() { + NodeTopdownMode::F3 + } else { + NodeTopdownMode::Legacy + } + } + mode => mode, + } +} + /// Imports wallets into the IPC keystore fn import_wallets(all_imports: &Vec, provider: &IpcProvider) -> Result<()> { log::info!("Importing wallets"); @@ -229,9 +278,12 @@ pub async fn generate_node_config( parent_id: &SubnetID, genesis_path: &std::path::Path, activation_info: Option<&ActivateConfig>, + topdown_mode: NodeTopdownMode, + parent_filecoin_rpc: Option<&url::Url>, + ipc_config: &ipc_provider::config::Config, output_dir: &std::path::Path, ) -> anyhow::Result { - use crate::commands::node::config::{GenesisSource, NodeInitConfig}; + use crate::commands::node::config::{GenesisSource, NodeInitConfig, P2pPortsConfig}; use crate::commands::subnet::init::config::JoinConfig; use crate::commands::wallet::import::WalletImportArgs; @@ -288,9 +340,32 @@ pub async fn generate_node_config( }) }; - // Create basic node config with sensible defaults + let parent = ipc_config + .subnets + .get(parent_id) + .context("parent subnet not found in config")?; + + let resolved_topdown_mode = resolve_topdown_mode(topdown_mode, parent_filecoin_rpc); + + let default_home = default_node_home(output_dir); + let parent_http_endpoint = parent_filecoin_rpc + .map(|u| u.to_string()) + .unwrap_or_else(|| parent.rpc_http().to_string()); + let parent_registry = address_to_eth_hex(&parent.registry_addr()) + .context("failed to convert parent registry address to ETH hex")?; + let parent_gateway = address_to_eth_hex(&parent.gateway_addr()) + .context("failed to convert parent gateway address to ETH hex")?; + let fendermint_overrides = build_default_fendermint_overrides( + subnet_id, + resolved_topdown_mode, + &parent_http_endpoint, + &parent_registry, + &parent_gateway, + )?; + + // Create basic node config with sensible defaults, ready to run. let node_config = NodeInitConfig { - home: "~/.node-ipc".into(), + home: default_home, subnet: subnet_id.to_string(), parent: parent_id.to_string(), genesis: genesis_source, @@ -303,11 +378,14 @@ pub async fn generate_node_config( p2p: Some(crate::commands::node::config::P2pConfig { external_ip: Some("127.0.0.1".to_string()), // Default external IP for user to modify listen_ip: Some("0.0.0.0".to_string()), // Default listen IP (binds to all interfaces) - ports: None, // Let user configure ports - peers: None, // Let user configure peers + ports: Some(P2pPortsConfig { + cometbft: Some(26656), + resolver: Some(26655), + }), + peers: None, // Let user configure peers }), cometbft_overrides: None, - fendermint_overrides: None, + fendermint_overrides: Some(fendermint_overrides), }; // Serialize NodeInitConfig to YAML @@ -327,6 +405,78 @@ pub async fn generate_node_config( Ok(node_config_path) } +fn address_to_eth_hex(addr: &fvm_shared::address::Address) -> anyhow::Result { + let eth = match addr.payload() { + Payload::Delegated(inner) => { + let subaddr = inner.subaddress(); + if subaddr.len() < 20 { + anyhow::bail!("delegated address subaddress too short for ETH conversion"); + } + let mut bytes = [0u8; 20]; + bytes.copy_from_slice(&subaddr[..20]); + EthAddress(bytes) + } + Payload::ID(id) => EthAddress::from_id(*id), + _ => anyhow::bail!("address is not convertible to ETH format"), + }; + + Ok(format!("0x{:?}", eth)) +} + +fn default_node_home(output_dir: &std::path::Path) -> std::path::PathBuf { + if let Some(home) = std::env::var_os("HOME") { + return std::path::PathBuf::from(home).join(".node-ipc"); + } + + output_dir + .parent() + .map(|p| p.join(".node-ipc")) + .unwrap_or_else(|| std::path::PathBuf::from(".node-ipc")) +} + +fn build_default_fendermint_overrides( + subnet_id: &SubnetID, + topdown_mode: NodeTopdownMode, + parent_http_endpoint: &str, + parent_registry: &str, + parent_gateway: &str, +) -> anyhow::Result { + let mut overrides = format!( + r#"[ipc] +subnet_id = "{subnet_id}" + +[ipc.topdown] +chain_head_delay = 2 +proposal_delay = 2 +max_proposal_range = 100 +polling_interval = 2 +exponential_back_off = 5 +exponential_retry_limit = 5 +parent_http_endpoint = "{parent_http_endpoint}" +parent_registry = "{parent_registry}" +parent_gateway = "{parent_gateway}" +"# + ); + + if matches!(topdown_mode, NodeTopdownMode::F3) { + overrides.push_str(&format!( + r#" +[ipc.topdown.f3.proof_service] +enabled = true +polling_interval = "30s" +parent_rpc_url = "{parent_http_endpoint}" +gateway_id = "{parent_gateway}" + +[ipc.topdown.f3.proof_service.cache_config] +lookahead_instances = 5 +retention_epochs = 100 +"# + )); + } + + toml::from_str(&overrides).context("failed to create default Fendermint topdown overrides") +} + /// Generate subnet information JSON file pub async fn generate_subnet_info( subnet_id: &SubnetID, diff --git a/ipc/cli/src/main.rs b/ipc/cli/src/main.rs index 6fda1b955b..0b4b82d8e9 100644 --- a/ipc/cli/src/main.rs +++ b/ipc/cli/src/main.rs @@ -12,19 +12,17 @@ async fn main() { } fn print_user_friendly_error(error: &anyhow::Error) { - // Extract the most meaningful error message - let error_msg = extract_meaningful_error(error); + // Print the full error chain so nothing is hidden from the user. + eprintln!("\nāŒ Error: {:#}", error); - // Print a clean, user-friendly error message - eprintln!("\nāŒ Error: {}", error_msg); - - // Provide helpful suggestions based on the error type - if let Some(suggestion) = get_error_suggestion(&error_msg) { + // Provide helpful suggestions based on the top-level message. + let top = error.to_string(); + if let Some(suggestion) = get_error_suggestion(&top) { eprintln!("\nšŸ’” Suggestion: {}", suggestion); } - // Check if this might be a contract-related error and suggest the documentation - if is_contract_related_error(&error_msg) { + // Suggest documentation for contract-related errors. + if is_contract_related_error(&top) { eprintln!("\nšŸ“– For detailed information about contract errors, see:"); eprintln!( " https://github.com/consensus-shipyard/ipc/blob/main/docs/ipc/contract-errors.md" @@ -32,44 +30,7 @@ fn print_user_friendly_error(error: &anyhow::Error) { eprintln!(" or run: ipc-cli --help"); } - // For debugging, show the full error chain if RUST_BACKTRACE is set - if std::env::var("RUST_BACKTRACE").is_ok() { - eprintln!("\nšŸ” Full error details:"); - eprintln!("{}", error); - } - - eprintln!(); // Add spacing for better readability -} - -fn extract_meaningful_error(error: &anyhow::Error) -> String { - // Get the root cause of the error chain - let mut root_cause = error.to_string(); - - // Get the first source error if available - if let Some(source) = error.source() { - root_cause = source.to_string(); - } - - // Clean up common error patterns - let cleaned = root_cause - .replace("error processing command Some(", "") - .replace("main process failed: ", "") - .trim() - .to_string(); - - // Special handling for contract revert errors - if cleaned.contains("Contract call reverted with data:") { - // Provide a generic but helpful message - return "Contract operation failed. The transaction was reverted by the smart contract." - .to_string(); - } - - // If the cleaned message is significantly shorter, use it - if cleaned.len() < root_cause.len() * 2 / 3 { - cleaned - } else { - root_cause - } + eprintln!(); } fn is_contract_related_error(error_msg: &str) -> bool { diff --git a/ipc/provider/src/lib.rs b/ipc/provider/src/lib.rs index 3f2d62f568..d09e1ae656 100644 --- a/ipc/provider/src/lib.rs +++ b/ipc/provider/src/lib.rs @@ -649,6 +649,21 @@ impl IpcProvider { conn.manager().get_validator_changeset(subnet, epoch).await } + /// Get the changes in subnet validators in an inclusive range. This is fetched from parent. + pub async fn get_validator_changeset_range( + &self, + subnet: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> anyhow::Result> { + let parent = subnet.parent().ok_or_else(|| anyhow!("no parent found"))?; + let conn = self.get_connection(&parent)?; + + conn.manager() + .get_validator_changeset_range(subnet, from, to) + .await + } + /// Get genesis info for a child subnet. This can be used to deterministically /// generate the genesis of the subnet pub async fn get_genesis_info(&self, subnet: &SubnetID) -> anyhow::Result { @@ -668,6 +683,19 @@ impl IpcProvider { conn.manager().get_top_down_msgs(subnet, epoch).await } + /// Get the top down messages in an inclusive range. + pub async fn get_top_down_msgs_range( + &self, + subnet: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> anyhow::Result> { + let parent = subnet.parent().ok_or_else(|| anyhow!("no parent found"))?; + let conn = self.get_connection(&parent)?; + + conn.manager().get_top_down_msgs_range(subnet, from, to).await + } + pub async fn get_block_hash( &self, subnet: &SubnetID, diff --git a/ipc/provider/src/manager/evm/manager.rs b/ipc/provider/src/manager/evm/manager.rs index e9f03255b1..e00700b92d 100644 --- a/ipc/provider/src/manager/evm/manager.rs +++ b/ipc/provider/src/manager/evm/manager.rs @@ -84,6 +84,9 @@ const TRANSACTION_RECEIPT_RETRIES: usize = 200; /// The majority vote percentage for checkpoint submission when creating a subnet. const SUBNET_MAJORITY_PERCENTAGE: u8 = 67; +/// Maximum inclusive block span per `eth_getLogs` query for topdown ranges. +/// Chunking large backfills avoids provider-side range caps/timeouts. +const MAX_LOG_QUERY_RANGE: ChainEpoch = 500; pub struct EthSubnetManager { keystore: Option>>>, @@ -142,37 +145,19 @@ impl TopDownFinalityQuery for EthSubnetManager { subnet_id: &SubnetID, epoch: ChainEpoch, ) -> Result>> { - let gateway_contract = gateway_manager_facet::GatewayManagerFacet::new( - self.ipc_contract_info.gateway_addr, - Arc::new(self.ipc_contract_info.provider.clone()), - ); - - let topic1 = contract_address_from_subnet(subnet_id)?; - tracing::debug!( - "getting top down messages for subnet: {:?} with topic 1: {}", - subnet_id, - topic1, - ); - - let ev = gateway_contract - .event::() - .from_block(epoch as u64) - .to_block(epoch as u64) - .topic1(topic1) - .address(ValueOrArray::Value(gateway_contract.address())); + let events = self.query_top_down_msgs_range(subnet_id, epoch, epoch).await?; - let mut messages = vec![]; + let mut messages = Vec::with_capacity(events.len()); let mut hash = None; - for (event, meta) in query_with_meta(ev, gateway_contract.client()).await? { + for (event, event_hash) in events { if let Some(h) = hash { - if h != meta.block_hash { + if h != event_hash { return Err(anyhow!("block hash not equal")); } } else { - hash = Some(meta.block_hash); + hash = Some(event_hash); } - - messages.push(IpcEnvelope::try_from(event.message)?); + messages.push(event); } let block_hash = if let Some(h) = hash { @@ -186,6 +171,20 @@ impl TopDownFinalityQuery for EthSubnetManager { }) } + async fn get_top_down_msgs_range( + &self, + subnet_id: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> Result> { + Ok(self + .query_top_down_msgs_range(subnet_id, from, to) + .await? + .into_iter() + .map(|(msg, _)| msg) + .collect()) + } + async fn get_block_hash(&self, height: ChainEpoch) -> Result { let block = self .ipc_contract_info @@ -209,31 +208,21 @@ impl TopDownFinalityQuery for EthSubnetManager { subnet_id: &SubnetID, epoch: ChainEpoch, ) -> Result>> { - let address = contract_address_from_subnet(subnet_id)?; - tracing::info!("querying validator changes in evm subnet contract: {address:}"); - - let contract = subnet_actor_manager_facet::SubnetActorManagerFacet::new( - address, - Arc::new(self.ipc_contract_info.provider.clone()), - ); - - let ev = contract - .event::() - .from_block(epoch as u64) - .to_block(epoch as u64) - .address(ValueOrArray::Value(contract.address())); + let events = self + .query_validator_changeset_range(subnet_id, epoch, epoch) + .await?; - let mut changes = vec![]; + let mut changes = Vec::with_capacity(events.len()); let mut hash = None; - for (event, meta) in query_with_meta(ev, contract.client()).await? { + for (event, event_hash) in events { if let Some(h) = hash { - if h != meta.block_hash { + if h != event_hash { return Err(anyhow!("block hash not equal")); } } else { - hash = Some(meta.block_hash); + hash = Some(event_hash); } - changes.push(PowerChangeRequest::try_from(event)?); + changes.push(event); } let block_hash = if let Some(h) = hash { @@ -247,6 +236,20 @@ impl TopDownFinalityQuery for EthSubnetManager { }) } + async fn get_validator_changeset_range( + &self, + subnet_id: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> Result> { + Ok(self + .query_validator_changeset_range(subnet_id, from, to) + .await? + .into_iter() + .map(|(change, _)| change) + .collect()) + } + async fn latest_parent_finality(&self) -> Result { tracing::info!("querying latest parent finality "); @@ -1102,6 +1105,109 @@ impl EthManager for EthSubnetManager { } impl EthSubnetManager { + async fn query_top_down_msgs_range( + &self, + subnet_id: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> Result> { + if from > to { + return Ok(Vec::new()); + } + + let gateway_contract = gateway_manager_facet::GatewayManagerFacet::new( + self.ipc_contract_info.gateway_addr, + Arc::new(self.ipc_contract_info.provider.clone()), + ); + + let topic1 = contract_address_from_subnet(subnet_id)?; + tracing::debug!( + "getting top down messages for subnet: {:?} in range: [{}..={}] with topic 1: {}", + subnet_id, + from, + to, + topic1, + ); + + let mut messages = Vec::new(); + let mut chunk_start = from; + while chunk_start <= to { + let chunk_end = chunk_start + .saturating_add(MAX_LOG_QUERY_RANGE.saturating_sub(1)) + .min(to); + tracing::debug!( + subnet = %subnet_id, + chunk_start, + chunk_end, + "querying top down messages chunk" + ); + + let ev = gateway_contract + .event::() + .from_block(chunk_start as u64) + .to_block(chunk_end as u64) + .topic1(topic1) + .address(ValueOrArray::Value(gateway_contract.address())); + + for (event, meta) in query_with_meta(ev, gateway_contract.client()).await? { + messages.push((IpcEnvelope::try_from(event.message)?, meta.block_hash)); + } + + chunk_start = chunk_end.saturating_add(1); + } + Ok(messages) + } + + async fn query_validator_changeset_range( + &self, + subnet_id: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> Result> { + if from > to { + return Ok(Vec::new()); + } + + let address = contract_address_from_subnet(subnet_id)?; + tracing::info!( + "querying validator changes in evm subnet contract: {address:} in range: [{}..={}]", + from, + to + ); + + let contract = subnet_actor_manager_facet::SubnetActorManagerFacet::new( + address, + Arc::new(self.ipc_contract_info.provider.clone()), + ); + + let mut changes = Vec::new(); + let mut chunk_start = from; + while chunk_start <= to { + let chunk_end = chunk_start + .saturating_add(MAX_LOG_QUERY_RANGE.saturating_sub(1)) + .min(to); + tracing::debug!( + subnet = %subnet_id, + chunk_start, + chunk_end, + "querying validator changes chunk" + ); + + let ev = contract + .event::() + .from_block(chunk_start as u64) + .to_block(chunk_end as u64) + .address(ValueOrArray::Value(contract.address())); + + for (event, meta) in query_with_meta(ev, contract.client()).await? { + changes.push((PowerChangeRequest::try_from(event)?, meta.block_hash)); + } + + chunk_start = chunk_end.saturating_add(1); + } + Ok(changes) + } + pub fn new( gateway_addr: ethers::types::Address, registry_addr: ethers::types::Address, diff --git a/ipc/provider/src/manager/subnet.rs b/ipc/provider/src/manager/subnet.rs index d9ca8478b9..04d13fc5ff 100644 --- a/ipc/provider/src/manager/subnet.rs +++ b/ipc/provider/src/manager/subnet.rs @@ -254,6 +254,13 @@ pub trait TopDownFinalityQuery: Send + Sync { subnet_id: &SubnetID, epoch: ChainEpoch, ) -> Result>>; + /// Returns top down messages in an inclusive range. + async fn get_top_down_msgs_range( + &self, + subnet_id: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> Result>; /// Get the block hash async fn get_block_hash(&self, height: ChainEpoch) -> Result; /// Get the validator change set from start to end block. @@ -262,6 +269,13 @@ pub trait TopDownFinalityQuery: Send + Sync { subnet_id: &SubnetID, epoch: ChainEpoch, ) -> Result>>; + /// Returns validator changes in an inclusive range. + async fn get_validator_changeset_range( + &self, + subnet_id: &SubnetID, + from: ChainEpoch, + to: ChainEpoch, + ) -> Result>; /// Returns the latest parent finality committed in a child subnet async fn latest_parent_finality(&self) -> Result; } diff --git a/scripts/ipc-subnet-manager/ipc-subnet-config.yml b/scripts/ipc-subnet-manager/ipc-subnet-config.yml index 7f95f57cb2..74b9c9f329 100644 --- a/scripts/ipc-subnet-manager/ipc-subnet-config.yml +++ b/scripts/ipc-subnet-manager/ipc-subnet-config.yml @@ -19,7 +19,7 @@ subnet: # ip: external IP (for SSH, RPC). internal_ip: for CometBFT/libp2p peer connections (same VPC) validators: - name: "validator-1" - ip: "34.16.93.183" + ip: "130.211.225.54" internal_ip: "10.128.0.2" ssh_user: "philip" ipc_user: "ipc" diff --git a/test.sh b/test.sh new file mode 100755 index 0000000000..e659cf68e1 --- /dev/null +++ b/test.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +set -euo pipefail + +CLI=./target/release/ipc-cli +GW=http://136.115.12.207:8080 + +pass() { echo " āœ… PASS"; } +fail() { echo " āŒ FAIL: $1"; exit 1; } + +# ── Setup: ensure credit + bucket exist ────────────────────────────────────── +echo "=== 0. Setup ===" + +echo " Buying 0.1 FIL of storage credit..." +$CLI storage client credit buy 0.1 || fail "credit buy" + +echo " Checking for existing buckets..." +BUCKET=$($CLI storage client bucket list 2>/dev/null \ + | grep -oE 't0[0-9]+' | head -1 || true) + +if [ -z "$BUCKET" ]; then + echo " No bucket found — creating one..." + $CLI storage client bucket create || fail "bucket create" + sleep 5 + BUCKET=$($CLI storage client bucket list 2>/dev/null \ + | grep -oE 't0[0-9]+' | head -1 || true) + [ -n "$BUCKET" ] || fail "could not find bucket after creation" +fi +echo " Using bucket: $BUCKET" +pass + +# ── Cleanup old test data ──────────────────────────────────────────────────── +echo "=== 1. Cleanup ===" +$CLI storage client rm -r --force ipc://$BUCKET/t/ 2>&1 || true +echo " (pending blobs may prevent full cleanup — that's OK)" + +rm -rf /tmp/ipc-test /tmp/ipc-dl +mkdir -p /tmp/ipc-test/subdir +echo "hello ipc storage" > /tmp/ipc-test/file1.txt +echo "second file" > /tmp/ipc-test/file2.txt +echo "nested file" > /tmp/ipc-test/subdir/nested.txt +pass + +# ── Phase 1: Upload & Read ─────────────────────────────────────────────────── +echo "=== 2. credit info ===" +$CLI storage client credit info || fail "credit info" +pass + +echo "=== 3. bucket list ===" +$CLI storage client bucket list || fail "bucket list" +pass + +echo "=== 4. upload file1.txt ===" +$CLI storage client cp --overwrite /tmp/ipc-test/file1.txt ipc://$BUCKET/t/file1.txt --gateway $GW \ + || fail "upload file1" +pass + +echo "=== 5. upload file2.txt ===" +$CLI storage client cp --overwrite /tmp/ipc-test/file2.txt ipc://$BUCKET/t/file2.txt --gateway $GW \ + || fail "upload file2" +pass + +echo "=== 6. upload directory recursively ===" +$CLI storage client cp -r --overwrite /tmp/ipc-test/subdir ipc://$BUCKET/t/subdir --gateway $GW \ + || fail "upload directory" +pass + +echo "=== 7. list all ===" +$CLI storage client ls ipc://$BUCKET/ || fail "ls all" +pass + +echo "=== 8. list prefix t/ ===" +$CLI storage client ls ipc://$BUCKET/t/ || fail "ls prefix" +pass + +echo "=== 9. stat ===" +$CLI storage client stat ipc://$BUCKET/t/file1.txt || fail "stat" +pass + +echo "=== 10. cat ===" +OUTPUT=$($CLI storage client cat ipc://$BUCKET/t/file1.txt --gateway $GW) +echo "$OUTPUT" +[ "$OUTPUT" = "hello ipc storage" ] || fail "cat content mismatch" +pass + +echo "=== 11. download single file ===" +mkdir -p /tmp/ipc-dl +$CLI storage client cp ipc://$BUCKET/t/file2.txt /tmp/ipc-dl/file2.txt --gateway $GW \ + || fail "download single" +CONTENT=$(cat /tmp/ipc-dl/file2.txt) +echo "$CONTENT" +[ "$CONTENT" = "second file" ] || fail "download content mismatch" +pass + +echo "=== 12. download directory recursively ===" +$CLI storage client cp -r ipc://$BUCKET/t/subdir /tmp/ipc-dl/subdir --gateway $GW \ + || fail "download directory" +[ -f /tmp/ipc-dl/subdir/nested.txt ] || fail "nested.txt not downloaded" +pass + +# ── Phase 2: Mutations (need blob finalization) ────────────────────────────── +echo "" +echo "── Waiting 90s for blob finalization before testing mv/rm... ──" +sleep 90 + +echo "=== 13. move file2 -> file2-renamed ===" +$CLI storage client mv ipc://$BUCKET/t/file2.txt ipc://$BUCKET/t/file2-renamed.txt \ + --gateway $GW || fail "mv" +pass + +echo "=== 14. verify rename ===" +$CLI storage client ls ipc://$BUCKET/t/ +pass + +echo "=== 15. rm file2-renamed ===" +$CLI storage client rm --force ipc://$BUCKET/t/file2-renamed.txt || fail "rm single" +pass + +echo "=== 16. rm file1 ===" +$CLI storage client rm --force ipc://$BUCKET/t/file1.txt || fail "rm file1" +pass + +echo "=== 17. rm -r t/subdir ===" +$CLI storage client rm -r --force ipc://$BUCKET/t/subdir || fail "rm recursive" +pass + +echo "=== 18. final list ===" +$CLI storage client ls ipc://$BUCKET/ +pass + +echo "" +echo "════════════════════════════════════════════════" +echo " All 18 steps passed!" +echo "════════════════════════════════════════════════" + +rm -rf /tmp/ipc-test /tmp/ipc-dl