Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion contracts/contracts/gateway/router/CheckpointingFacet.sol
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
pragma solidity ^0.8.23;

import {GatewayActorModifiers} from "../../lib/LibGatewayActorStorage.sol";
import {BottomUpCheckpoint} from "../../structs/CrossNet.sol";
import {BottomUpCheckpoint, ActivitySummary, ActivitySummaryCommitted} from "../../structs/CrossNet.sol";
import {LibGateway} from "../../lib/LibGateway.sol";
import {LibQuorum} from "../../lib/LibQuorum.sol";
import {Subnet} from "../../structs/Subnet.sol";
Expand Down Expand Up @@ -49,13 +49,17 @@ contract CheckpointingFacet is GatewayActorModifiers {
/// @param membershipWeight - the total weight of the membership
function createBottomUpCheckpoint(
BottomUpCheckpoint calldata checkpoint,
// TODO(rewarder) ActivitySummary calldata summary,
bytes32 membershipRootHash,
uint256 membershipWeight
) external systemActorOnly {
if (LibGateway.bottomUpCheckpointExists(checkpoint.blockHeight)) {
revert CheckpointAlreadyExists();
}

// TODO(rewarder): compute the commitment to the summary and set it in the checkpoint.
// Collect summaries to relay and put them in the checkpoint. Reset the pending summaries map.

LibQuorum.createQuorumInfo({
self: s.checkpointQuorumMap,
objHeight: checkpoint.blockHeight,
Expand All @@ -64,6 +68,9 @@ contract CheckpointingFacet is GatewayActorModifiers {
membershipWeight: membershipWeight,
majorityPercentage: s.majorityPercentage
});

// TODO(rewarder): emit an ActivitySummaryCommittedevent so relayers can pick it up.

LibGateway.storeBottomUpCheckpoint(checkpoint);
}

Expand Down
18 changes: 18 additions & 0 deletions contracts/contracts/interfaces/IValidatorRewarder.sol
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity ^0.8.23;

import {SubnetID} from "../structs/Subnet.sol";

/// @title ValidatorRewarder interface.
///
/// @dev Implement this interface and supply the address of the implementation contract at subnet creation to process
/// subnet summaries at this level, and disburse rewards to validators based on their block production activity.
///
/// This interface will be called by the subnet actor when a relayer presents a
interface IValidatorRewarder {
/// @notice Called by the subnet manager contract to instruct the rewarder to process the subnet summary and
/// disburse any relevant rewards.
/// The
/// @dev This method should revert if the summary is invalid; this will cause the
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this will cause the what will it cause? And what criteria makes a summary invalid?

function disburseRewards(SubnetID memory id, ActivitySummary memory summary) external;
}
19 changes: 19 additions & 0 deletions contracts/contracts/lib/LibSubnetActorStorage.sol
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,25 @@ import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet
address[] genesisBalanceKeys;
/// @notice The validator gater, if address(0), no validator gating is performed
address validatorGater;

/// BEGIN Validator Rewards.

/// @notice The validator rewarder.
/// If address(0), this subnet does not process activity summaries, but instead forwards them to the parent
/// network via bottom-up checkpoints.
/// If address(0), and this is the root network, summaries are discarded (> /dev/null).
/// TODO(rewarder): set this address correctly from the constructor.
address validatorRewarder;
/// @notice Summaries pending to be processed.
/// If the validator rewarder is non-zero, these denote summaries presentable at this level.
/// If the validator rewarder is zero, these summaries must be relayed upwards in the next bottom-up checkpoint.
/// Partitioned by subnet ID, in the sequence they must be presented.
/// TODO(rewarder): optimize this pair of data structures.
mapping(SubnetID => bytes32[]) pendingSummaries;
/// @notice Index over presentable summaries back to the subnet ID, so we can locate them quickly when they're presented.
/// Only used if the validator rewarder is non-zero.
/// TODO(rewarder): optimize this pair of data structures.
mapping(bytes32 => SubnetID) presentableSummaries;
}

library LibSubnetActorStorage {
Expand Down
25 changes: 25 additions & 0 deletions contracts/contracts/structs/CrossNet.sol
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,31 @@ struct BottomUpCheckpoint {
uint64 nextConfigurationNumber;
/// @dev Batch of messages to execute.
IpcEnvelope[] msgs;
/// @dev A commitment to the summary of our chain activity since the previous checkpoint and this one.
bytes32 summary;
/// @dev Summaries relayed upwards from descendants of this subnet.
/// NOTE: Not merkelized to keep it simple, but we will merkelize later to scale better.
RelayedSummary[] relayedSummaries;
}

struct ActivitySummary {
/// @dev The block range the activity summary spans; these are the local heights of the start and the end, inclusive.
uint256[2] blockRange;
/// @dev The validators whose activity we're reporting about.
address[] validators;
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just curious why 2 arrays instead of a mapping?

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand the arrays if when distributing the rewards we have to process them all but I agree with @cryptoAtwill, validators could claim their own rewards, so a mapping would make sense.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a DTO, so mapping cannot, but I have updated the data struct slightly for storage

/// @dev The number of blocks committed by each validator in the position they appear in the validators array.
/// If there is a configuration change applied at this checkpoint, this carries information about the _old_ validator set.
uint64[] blocksCommitted;
Copy link
Copy Markdown
Contributor

@JulissaDantes JulissaDantes Oct 4, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we have a metadata bytes typed member here to allow to pass additional info? or are there other alternatives for customization that are still in the making?

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice suggestion @JulissaDantes. @raulk, in particular, we have additional information we'd like to propagate up the hierarchy. I know we had talked about specific "zones" for this type of information in the checkpoints at one point. What is your thinking here at this stage?

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

At least for solidity, there is no generic, then perhaps only bytes array? To let the subnet/fendermint and parent IValidatorRewarder handle the bytes deserialization + distribution. What the framework does is just checking:

  • Validator has claimed before?
  • Does the claim actually exists?
  • The lifecycle of the validator reward

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, this is the direction I wanted to move things in. This was the idea:

  1. We add a region in checkpoints to carry extensible subnet activity reports in the form of { namespace/type/discriminator: bytes, payload: bytes }[] -- we can call this checkpoint extensions.
  2. IPC ships with a set of default subnet activity reporters as extensions, e.g. the block production one here.
  3. Users can plug in custom subnet activity reporters to, e.g. report on quality of service, retrieval rate, scoring, etc.
  4. Ideally the reporters are Wasm actors, so they can compose easily and be upgraded through governance in the future. However, to truly make this design client/node-agnostic, users would likely need to add custom syscalls to access the metadata actors will want to report on.
  5. We'd need a master actor to orchestrate "hooks". The extend_checkpoint hook would be called by the IPC node, injecting the raw Checkpoint and expecting an array of extension fragments in return. The client would assemble them into the Checkpoint and submit that on chain for local quorum signature.

@carsonfarmer @JulissaDantes In my head we could defer this feature, but it sounds like it would be beneficial to you guys to have it now, right?

If so, I think we can simplify the above on several fronts to try to meet the time constraints while delivering the feature. How about:

  1. We don't bother with developing the checkpoint extensions as Wasm-land actors for now; we just add well-bounded hook callbacks in Fendermint with the intention to migrate them to actors in the future.
  2. We have the node orchestrate the hooks instead of a master actor.

Open points:

  1. Namespacing.
  2. Parent execution.

}

event ActivitySummaryCommitted(bytes32 indexed commitment, ActivitySummary summary);

struct RelayedSummary {
/// @dev The subnet IDs whose activity is being relayed.
SubnetID subnet;
/// @dev The commitment to the summary, so it can be presented later by the relayer.
/// A blake2b hash of the summary generated by abi.encode'ing the ActivitySummary and hashing it via the Eth precompile.
bytes32 commitment;
}

/// @notice A batch of bottom-up messages for execution.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ contract SubnetActorCheckpointingFacet is SubnetActorModifiers, ReentrancyGuard,

s.lastBottomUpCheckpointHeight = checkpoint.blockHeight;

// TODO(rewarder): if we have a non-zero validator rewarder at this level, queue the commitment for processing in storage (add to pending and presentable summaries).
// If we have a zero validator rewarder at this level, and we are the L1, discard the incoming commitments.
// If we have a zero validator rewarder at this level, and we are not the L1, relay the commitments upwards (add to pending summaries).

// Commit in gateway to distribute rewards
IGateway(s.ipcGatewayAddr).commitCheckpoint(checkpoint);

Expand Down
12 changes: 12 additions & 0 deletions contracts/contracts/subnet/SubnetActorRewardFacet.sol
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity ^0.8.23;

import {ActivitySummary} from "../structs/CrossNet.sol";
import {QuorumObjKind} from "../structs/Quorum.sol";
import {Pausable} from "../lib/LibPausable.sol";
import {ReentrancyGuard} from "../lib/LibReentrancyGuard.sol";
Expand All @@ -13,6 +14,17 @@ import {Asset} from "../structs/Subnet.sol";
contract SubnetActorRewardFacet is SubnetActorModifiers, ReentrancyGuard, Pausable {
using AssetHelper for Asset;

// TODO(rewards): add this function so that relayers can submit summaries to process reward payouts in the root network.
function submitSummary(SubnetID subnetId, ActivitySummary memory summary) external nonReentrant whenNotPaused {
// TODO(rewards):
// 1. Check that the subnet is active.
// 2. Check that the subnet has a non-zero ValidatorRewarder.
// 3. Hash the activity summary to get the commitment.
// 4. Validate that the commitment is pending and presentable, and validate that it matches the expected subnet.
// 5. Send the summary to the ValidatorRewarder#disburseRewards.
// 6. If OK (not reverted), drop the summary from the pending and presentable commitments.
}

/// @notice Validator claims their released collateral.
function claim() external nonReentrant whenNotPaused {
uint256 amount = LibStaking.claimCollateral(msg.sender);
Expand Down
7 changes: 7 additions & 0 deletions fendermint/vm/interpreter/src/fvm/checkpoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,13 @@ where

let num_msgs = msgs.len();

// TODO(rewards): query block producers for the blocks from the last checkpointed epoch to the current one.
// Ideally keep a live cache of block producers, append to it when new blocks are committed, and prune it when generating a checkpoint.
// But for now, we can try to keep it simple and query CometBFT, although that adds latency.
// If we do this, this method seems to be the quickest way: https://docs.cometbft.com/main/rpc/#/Info/block_search

// TODO(rewards): populate the ActivitySummary struct with the information above, and pass it to the create_bottom_up_checkpoint call.

// Construct checkpoint.
let checkpoint = BottomUpCheckpoint {
subnet_id,
Expand Down
1 change: 1 addition & 0 deletions ipc/api/src/checkpoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ pub struct BottomUpCheckpoint {
pub next_configuration_number: u64,
/// The list of messages for execution
pub msgs: Vec<IpcEnvelope>,
// TODO(rewards): add new fields and data types for summaries and commitments.
}

pub fn serialize_vec_bytes_to_vec_hex<T: AsRef<[u8]>, S>(
Expand Down
7 changes: 7 additions & 0 deletions ipc/cli/src/commands/checkpoint/relayer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,13 @@ impl CommandLineHandler for BottomUpRelayer {
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("start bottom up relayer with args: {:?}", arguments);

// TODO(rewards): enable the relayer to watch multiple subnets at once.

// TODO(rewards): add a new flag --process-summaries to activate processing summaries on all subnets.
// Enabling this mode makes the relayer watch for ActivitySummaryCommitted events, and stores the summaries in a database.
// It then tracks which summaries have been committed to the root (right now we only support submitting to the L1), to chase
// after those and present them via SubnetActor#submitSummary in order to trigger reward payout.

// Prometheus metrics
match &arguments.metrics_address {
Some(addr) => {
Expand Down