diff --git a/.changeset/shaky-phones-mix.md b/.changeset/shaky-phones-mix.md new file mode 100644 index 00000000000..410af473108 --- /dev/null +++ b/.changeset/shaky-phones-mix.md @@ -0,0 +1,5 @@ +--- +'openzeppelin-solidity': minor +--- + +`TrieProof`: Add library for verifying Ethereum Merkle-Patricia trie inclusion proofs. diff --git a/.changeset/tender-pans-yawn.md b/.changeset/tender-pans-yawn.md new file mode 100644 index 00000000000..d9a9e39563d --- /dev/null +++ b/.changeset/tender-pans-yawn.md @@ -0,0 +1,5 @@ +--- +'openzeppelin-solidity': minor +--- + +`Bytes`: Add the `toNibbles` function that expands the nibbles (4 bits chunk) of a `bytes` buffer. Used for manipulating Patricia Merkle Trees keys and paths. diff --git a/contracts/mocks/Stateless.sol b/contracts/mocks/Stateless.sol index 5b11eb657b8..30188a8f4be 100644 --- a/contracts/mocks/Stateless.sol +++ b/contracts/mocks/Stateless.sol @@ -57,5 +57,6 @@ import {SignedMath} from "../utils/math/SignedMath.sol"; import {StorageSlot} from "../utils/StorageSlot.sol"; import {Strings} from "../utils/Strings.sol"; import {Time} from "../utils/types/Time.sol"; +import {TrieProof} from "../utils/cryptography/TrieProof.sol"; contract Dummy1234 {} diff --git a/contracts/utils/Bytes.sol b/contracts/utils/Bytes.sol index 06e2afad46d..c3460740f10 100644 --- a/contracts/utils/Bytes.sol +++ b/contracts/utils/Bytes.sol @@ -202,6 +202,48 @@ library Bytes { return result; } + /** + * @dev Split each byte in `input` into two nibbles (4 bits each) + * + * Example: hex"01234567" → hex"0001020304050607" + */ + function toNibbles(bytes memory input) internal pure returns (bytes memory output) { + assembly ("memory-safe") { + let length := mload(input) + output := mload(0x40) + mstore(0x40, add(add(output, 0x20), mul(length, 2))) + mstore(output, mul(length, 2)) + for { + let i := 0 + } lt(i, length) { + i := add(i, 0x10) + } { + let chunk := shr(128, mload(add(add(input, 0x20), i))) + chunk := and( + 0x0000000000000000ffffffffffffffff0000000000000000ffffffffffffffff, + or(shl(64, chunk), chunk) + ) + chunk := and( + 0x00000000ffffffff00000000ffffffff00000000ffffffff00000000ffffffff, + or(shl(32, chunk), chunk) + ) + chunk := and( + 0x0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff0000ffff, + or(shl(16, chunk), chunk) + ) + chunk := and( + 0x00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff00ff, + or(shl(8, chunk), chunk) + ) + chunk := and( + 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f, + or(shl(4, chunk), chunk) + ) + mstore(add(add(output, 0x20), mul(i, 2)), chunk) + } + } + } + /** * @dev Returns true if the two byte buffers are equal. */ diff --git a/contracts/utils/Memory.sol b/contracts/utils/Memory.sol index 378d247c041..bd21666b1d5 100644 --- a/contracts/utils/Memory.sol +++ b/contracts/utils/Memory.sol @@ -112,6 +112,17 @@ library Memory { } } + /// @dev Returns true if the two slices contain the same data. + function equal(Slice a, Slice b) internal pure returns (bool result) { + Memory.Pointer ptrA = _pointer(a); + Memory.Pointer ptrB = _pointer(b); + uint256 lenA = length(a); + uint256 lenB = length(b); + assembly ("memory-safe") { + result := eq(keccak256(ptrA, lenA), keccak256(ptrB, lenB)) + } + } + /** * @dev Private helper: create a slice from raw values (length and pointer) * diff --git a/contracts/utils/cryptography/README.adoc b/contracts/utils/cryptography/README.adoc index 13243625e04..66c47d12675 100644 --- a/contracts/utils/cryptography/README.adoc +++ b/contracts/utils/cryptography/README.adoc @@ -11,6 +11,7 @@ A collection of contracts and libraries that implement various signature validat * {SignatureChecker}: A library helper to support regular ECDSA from EOAs as well as ERC-1271 signatures for smart contracts. * {Hashes}: Commonly used hash functions. * {MerkleProof}: Functions for verifying https://en.wikipedia.org/wiki/Merkle_tree[Merkle Tree] proofs. + * {TrieProof}: Library for verifying Ethereum Merkle-Patricia trie inclusion proofs. * {EIP712}: Contract with functions to allow processing signed typed structure data according to https://eips.ethereum.org/EIPS/eip-712[EIP-712]. * {ERC7739Utils}: Utilities library that implements a defensive rehashing mechanism to prevent replayability of smart contract signatures based on ERC-7739. * {WebAuthn}: Library for verifying WebAuthn Authentication Assertions. @@ -38,6 +39,8 @@ A collection of contracts and libraries that implement various signature validat {{MerkleProof}} +{{TrieProof}} + {{EIP712}} {{ERC7739Utils}} diff --git a/contracts/utils/cryptography/TrieProof.sol b/contracts/utils/cryptography/TrieProof.sol new file mode 100644 index 00000000000..4f7406eb9eb --- /dev/null +++ b/contracts/utils/cryptography/TrieProof.sol @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import {Math} from "../math/Math.sol"; +import {Bytes} from "../Bytes.sol"; +import {Memory} from "../Memory.sol"; +import {RLP} from "../RLP.sol"; + +/** + * @dev Library for verifying Ethereum Merkle-Patricia trie inclusion proofs. + * + * The {traverse} and {verify} functions can be used to prove the following value: + * + * * Transaction against the transactionsRoot of a block. + * * Event against receiptsRoot of a block. + * * Account details (RLP encoding of [nonce, balance, storageRoot, codeHash]) against the stateRoot of a block. + * * Storage slot (RLP encoding of the value) against the storageRoot of a account. + * + * Proving a storage slot is usually done in 3 steps: + * + * * From the stateRoot of a block, process the account proof (see `eth_getProof`) to get the account details. + * * RLP decode the account details to extract the storageRoot. + * * Use storageRoot of that account to process the storageProof (again, see `eth_getProof`). + * + * See https://ethereum.org/en/developers/docs/data-structures-and-encoding/patricia-merkle-trie[Merkle-Patricia trie] + * + * Based on https://github.com/ethereum-optimism/optimism/blob/ef970556e668b271a152124023a8d6bb5159bacf/packages/contracts-bedrock/src/libraries/trie/MerkleTrie.sol[this implementation from optimism]. + */ +library TrieProof { + using Bytes for *; + using RLP for *; + using Memory for *; + + enum Prefix { + EXTENSION_EVEN, // 0 - Extension node with even length path + EXTENSION_ODD, // 1 - Extension node with odd length path + LEAF_EVEN, // 2 - Leaf node with even length path + LEAF_ODD // 3 - Leaf node with odd length path + } + + enum ProofError { + NO_ERROR, // No error occurred during proof traversal + EMPTY_KEY, // The provided key is empty + INVALID_ROOT, // The validation of the root node failed + INVALID_LARGE_NODE, // The validation of a large node failed + INVALID_SHORT_NODE, // The validation of a short node failed + EMPTY_PATH, // The path in a leaf or extension node is empty + INVALID_PATH_REMAINDER, // The path remainder in a leaf or extension node is invalid + EMPTY_EXTENSION_PATH_REMAINDER, // The path remainder in an extension node is empty + INVALID_EXTRA_PROOF_ELEMENT, // A leaf value should be the last proof element + EMPTY_VALUE, // The leaf value is empty + MISMATCH_LEAF_PATH_KEY_REMAINDER, // The path remainder in a leaf node doesn't match the key remainder + UNKNOWN_NODE_PREFIX, // The node prefix is unknown + UNPARSEABLE_NODE, // The node cannot be parsed from RLP encoding + INVALID_PROOF // General failure during proof traversal + } + + error TrieProofTraversalError(ProofError err); + + /// @dev The radix of the Ethereum trie + uint256 internal constant EVM_TREE_RADIX = 16; + + /// @dev Number of items in a branch node (16 children + 1 value) + uint256 internal constant BRANCH_NODE_LENGTH = EVM_TREE_RADIX + 1; + + /// @dev Number of items in leaf or extension nodes (always 2) + uint256 internal constant LEAF_OR_EXTENSION_NODE_LENGTH = 2; + + /// @dev Verifies a `proof` against a given `key`, `value`, `and root` hash. + function verify( + bytes memory value, + bytes32 root, + bytes memory key, + bytes[] memory proof + ) internal pure returns (bool) { + (bytes memory processedValue, ProofError err) = tryTraverse(root, key, proof); + return processedValue.equal(value) && err == ProofError.NO_ERROR; + } + + /** + * @dev Traverses a proof with a given key and returns the value. + * + * Reverts with {TrieProofTraversalError} if proof is invalid. + */ + function traverse(bytes32 root, bytes memory key, bytes[] memory proof) internal pure returns (bytes memory) { + (bytes memory value, ProofError err) = tryTraverse(root, key, proof); + require(err == ProofError.NO_ERROR, TrieProofTraversalError(err)); + return value; + } + + /** + * @dev Traverses a proof with a given key and returns the value and an error flag + * instead of reverting if the proof is invalid. This function may still revert if + * malformed input leads to RLP decoding errors. + */ + function tryTraverse( + bytes32 root, + bytes memory key, + bytes[] memory proof + ) internal pure returns (bytes memory value, ProofError err) { + if (key.length == 0) return (_emptyBytesMemory(), ProofError.EMPTY_KEY); + + // Expand the key + bytes memory keyExpanded = key.toNibbles(); + + bytes32 currentNodeId; + uint256 currentNodeIdLength; + + // Free memory pointer cache + Memory.Pointer fmp = Memory.getFreeMemoryPointer(); + + // Traverse proof + uint256 keyIndex = 0; + uint256 proofLength = proof.length; + for (uint256 i = 0; i < proofLength; ++i) { + // validates the encoded node matches the expected node id + bytes memory encoded = proof[i]; + if (keyIndex == 0) { + // Root node must match root hash + if (keccak256(encoded) != root) return (_emptyBytesMemory(), ProofError.INVALID_ROOT); + } else if (encoded.length >= 32) { + // Large nodes are stored as hashes + if (currentNodeIdLength != 32 || keccak256(encoded) != currentNodeId) + return (_emptyBytesMemory(), ProofError.INVALID_LARGE_NODE); + } else { + // Small nodes must match directly + if (currentNodeIdLength != encoded.length || bytes32(encoded) != currentNodeId) + return (_emptyBytesMemory(), ProofError.INVALID_SHORT_NODE); + } + + // decode the current node as an RLP list, and process it + Memory.Slice[] memory decoded = encoded.decodeList(); + if (decoded.length == BRANCH_NODE_LENGTH) { + // If we've consumed the entire key, the value must be in the last slot + // Otherwise, continue down the branch specified by the next nibble in the key + if (keyIndex == keyExpanded.length) { + return _validateLastItem(decoded[EVM_TREE_RADIX], proofLength, i); + } else { + bytes1 branchKey = keyExpanded[keyIndex]; + (currentNodeId, currentNodeIdLength) = _getNodeId(decoded[uint8(branchKey)]); + keyIndex += 1; + } + } else if (decoded.length == LEAF_OR_EXTENSION_NODE_LENGTH) { + bytes memory path = decoded[0].readBytes().toNibbles(); // expanded path + // The following is equivalent to path.length < 2 because toNibbles can't return odd-length buffers + if (path.length == 0) { + return (_emptyBytesMemory(), ProofError.EMPTY_PATH); + } + uint8 prefix = uint8(path[0]); + Memory.Slice keyRemainder = keyExpanded.asSlice().slice(keyIndex); // Remaining key to match + Memory.Slice pathRemainder = path.asSlice().slice(2 - (prefix % 2)); // Path after the prefix + uint256 pathRemainderLength = pathRemainder.length(); + + // pathRemainder must not be longer than keyRemainder, and it must be a prefix of it + if ( + pathRemainderLength > keyRemainder.length() || + !pathRemainder.equal(keyRemainder.slice(0, pathRemainderLength)) + ) { + return (_emptyBytesMemory(), ProofError.INVALID_PATH_REMAINDER); + } + + if (prefix <= uint8(Prefix.EXTENSION_ODD)) { + // Eq to: prefix == EXTENSION_EVEN || prefix == EXTENSION_ODD + if (pathRemainderLength == 0) { + return (_emptyBytesMemory(), ProofError.EMPTY_EXTENSION_PATH_REMAINDER); + } + // Increment keyIndex by the number of nibbles consumed and continue traversal + (currentNodeId, currentNodeIdLength) = _getNodeId(decoded[1]); + keyIndex += pathRemainderLength; + } else if (prefix <= uint8(Prefix.LEAF_ODD)) { + // Eq to: prefix == LEAF_EVEN || prefix == LEAF_ODD + // + // Leaf node (terminal) - return its value if key matches completely + // we already know that pathRemainder is a prefix of keyRemainder, so checking the length sufficient + return + pathRemainderLength == keyRemainder.length() + ? _validateLastItem(decoded[1], proofLength, i) + : (_emptyBytesMemory(), ProofError.MISMATCH_LEAF_PATH_KEY_REMAINDER); + } else { + return (_emptyBytesMemory(), ProofError.UNKNOWN_NODE_PREFIX); + } + } else { + return (_emptyBytesMemory(), ProofError.UNPARSEABLE_NODE); + } + + // Reset memory before next iteration. Deallocates `decoded` and `path`. + Memory.setFreeMemoryPointer(fmp); + } + + // If we've gone through all proof elements without finding a value, the proof is invalid + return (_emptyBytesMemory(), ProofError.INVALID_PROOF); + } + + /** + * @dev Validates that we've reached a valid leaf value and this is the last proof element. + * Ensures the value is not empty and no extra proof elements exist. + */ + function _validateLastItem( + Memory.Slice item, + uint256 trieProofLength, + uint256 i + ) private pure returns (bytes memory, ProofError) { + if (i != trieProofLength - 1) { + return (_emptyBytesMemory(), ProofError.INVALID_EXTRA_PROOF_ELEMENT); + } + bytes memory value = item.readBytes(); + return (value, value.length == 0 ? ProofError.EMPTY_VALUE : ProofError.NO_ERROR); + } + + /** + * @dev Extracts the node ID (hash or raw data based on size) + * + * For small nodes (encoded length < 32 bytes) the node ID is the node content itself, + * For larger nodes, the node ID is the hash of the encoded node data. + * + * NOTE: Under normal operation, the input should never be exactly 32-byte inputs. If such an input is provided, + * it will be used directly, similarly to how small nodes are processed. The following traversal check whether + * the next node is a large one, and whether its hash matches the raw 32 bytes we have here. If that is the case, + * the value will be accepted. Otherwise, the next step will return an {INVALID_LARGE_NODE} error. + */ + function _getNodeId(Memory.Slice node) private pure returns (bytes32 nodeId, uint256 nodeIdLength) { + uint256 nodeLength = node.length(); + return nodeLength < 33 ? (node.load(0), nodeLength) : (node.readBytes32(), 32); + } + + function _emptyBytesMemory() private pure returns (bytes memory result) { + assembly ("memory-safe") { + result := 0x60 // mload(0x60) is always 0 + } + } +} diff --git a/package-lock.json b/package-lock.json index 801e7ecfbfa..73099e6e7a1 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,6 +14,7 @@ "@changesets/pre": "^2.0.0", "@changesets/read": "^0.6.0", "@eslint/compat": "^1.2.1", + "@ethereumjs/mpt": "^10.1.0", "@nomicfoundation/hardhat-chai-matchers": "^2.0.6", "@nomicfoundation/hardhat-ethers": "^3.0.9", "@nomicfoundation/hardhat-network-helpers": "^1.0.13", @@ -714,6 +715,60 @@ "crc-32": "^1.2.0" } }, + "node_modules/@ethereumjs/mpt": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/@ethereumjs/mpt/-/mpt-10.1.0.tgz", + "integrity": "sha512-gWxx8n1OB2Js3EFWNeSdsUdYvw1P5WvdKxHmeq+giO03mt5fGRbuSh3ruXzreG4JRugpNSW6Dwqk+StVwjrQ4Q==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "@ethereumjs/rlp": "^10.1.0", + "@ethereumjs/util": "^10.1.0", + "debug": "^4.4.0", + "ethereum-cryptography": "^3.2.0", + "lru-cache": "11.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@ethereumjs/mpt/node_modules/@ethereumjs/rlp": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/@ethereumjs/rlp/-/rlp-10.1.0.tgz", + "integrity": "sha512-r67BJbwilammAqYI4B5okA66cNdTlFzeWxPNJOolKV52ZS/flo0tUBf4x4gxWXBgh48OgsdFV1Qp5pRoSe8IhQ==", + "dev": true, + "license": "MPL-2.0", + "bin": { + "rlp": "bin/rlp.cjs" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@ethereumjs/mpt/node_modules/@ethereumjs/util": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/@ethereumjs/util/-/util-10.1.0.tgz", + "integrity": "sha512-GGTCkRu1kWXbz2JoUnIYtJBOoA9T5akzsYa91Bh+DZQ3Cj4qXj3hkNU0Rx6wZlbcmkmhQfrjZfVt52eJO/y2nA==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "@ethereumjs/rlp": "^10.1.0", + "ethereum-cryptography": "^3.2.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@ethereumjs/mpt/node_modules/lru-cache": { + "version": "11.0.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.0.2.tgz", + "integrity": "sha512-123qHRfJBmo2jXDbo/a5YOQrJoHF/GNQTLzQ5+IdK5pWpceK17yRc6ozlWd25FxvGKQbIUs91fDFkXmDHTKcyA==", + "dev": true, + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, "node_modules/@ethereumjs/rlp": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@ethereumjs/rlp/-/rlp-4.0.1.tgz", @@ -1886,6 +1941,7 @@ "integrity": "sha512-xBJdRUiCwKpr0OYrOzPwAyNGtsVzoBx32HFPJVv6S+sFA9TmBIBDaqNlFPmBH58ZjgNnGhEr/4oBZvGr4q4TjQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "debug": "^4.1.1", "lodash.isequal": "^4.5.0" @@ -2555,6 +2611,7 @@ "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -2622,6 +2679,7 @@ "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", @@ -3225,6 +3283,7 @@ "integrity": "sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "assertion-error": "^1.1.0", "check-error": "^1.0.3", @@ -4265,6 +4324,7 @@ "integrity": "sha512-E6Mtz9oGQWDCpV12319d59n4tx9zOTXSTmc8BLVxBx+G/0RdM5MvEEJLU9c0+aleoePYYgVTOsRblx433qmhWQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", @@ -4682,6 +4742,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "@adraffy/ens-normalize": "1.10.1", "@noble/curves": "1.2.0", @@ -5464,6 +5525,7 @@ "integrity": "sha512-hwEUBvMJzl3Iuru5bfMOEDeF2d7cbMNNF46rkwdo8AeW2GDT4VxFLyYWTi6PTLrZiftHPDiKDlAdAiGvsR9FYA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@ethereumjs/util": "^9.1.0", "@ethersproject/abi": "^5.1.2", @@ -8391,6 +8453,7 @@ "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", "dev": true, "license": "MIT", + "peer": true, "bin": { "prettier": "bin/prettier.cjs" }, @@ -10327,6 +10390,7 @@ "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -10905,6 +10969,7 @@ "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10.0.0" }, diff --git a/package.json b/package.json index e5b0985b10d..6f2d411dd5b 100644 --- a/package.json +++ b/package.json @@ -57,6 +57,7 @@ "@changesets/pre": "^2.0.0", "@changesets/read": "^0.6.0", "@eslint/compat": "^1.2.1", + "@ethereumjs/mpt": "^10.1.0", "@nomicfoundation/hardhat-chai-matchers": "^2.0.6", "@nomicfoundation/hardhat-ethers": "^3.0.9", "@nomicfoundation/hardhat-network-helpers": "^1.0.13", diff --git a/test/helpers/txpool.js b/test/helpers/txpool.js index f01327b223a..ad84f45dd77 100644 --- a/test/helpers/txpool.js +++ b/test/helpers/txpool.js @@ -1,17 +1,16 @@ const { network } = require('hardhat'); const { expect } = require('chai'); -const { mine } = require('@nomicfoundation/hardhat-network-helpers'); const { unique } = require('./iterate'); -async function batchInBlock(txs) { +async function batchInBlock(txs, provider = network.provider) { try { // disable auto-mining - await network.provider.send('evm_setAutomine', [false]); + await provider.send('evm_setAutomine', [false]); // send all transactions const responses = await Promise.all(txs.map(fn => fn())); // mine one block - await mine(); + await provider.send('evm_mine'); // fetch receipts const receipts = await Promise.all(responses.map(response => response.wait())); // Sanity check, all tx should be in the same block @@ -20,7 +19,7 @@ async function batchInBlock(txs) { return receipts; } finally { // enable auto-mining - await network.provider.send('evm_setAutomine', [true]); + await provider.send('evm_setAutomine', [true]); } } diff --git a/test/utils/Bytes.t.sol b/test/utils/Bytes.t.sol index 86966bda0c8..553d1b61ac5 100644 --- a/test/utils/Bytes.t.sol +++ b/test/utils/Bytes.t.sol @@ -155,6 +155,21 @@ contract BytesTest is Test { } } + // Convert to nibbles + function testSymbolictoNibbles(bytes memory input) public pure { + bytes memory nibbles = Bytes.toNibbles(input); + assertEq(nibbles.length, input.length * 2); + + // reconstruct input by traversing nibbles + for (uint256 i = 0; i < input.length; ++i) { + bytes1 nible0 = nibbles[2 * i]; + bytes1 nible1 = nibbles[2 * i + 1]; + assertEq(uint8(nible0) >> 4, 0); + assertEq(uint8(nible1) >> 4, 0); + assertEq(input[i], bytes1((uint8(nible0) << 4) | uint8(nible1))); + } + } + function testReplace(bytes memory buffer, uint256 pos, bytes memory replacement) public pure { bytes memory originalBuffer = bytes.concat(buffer); bytes memory originalReplacement = bytes.concat(replacement); diff --git a/test/utils/Bytes.test.js b/test/utils/Bytes.test.js index 9eb439cf9c9..9366c5d269f 100644 --- a/test/utils/Bytes.test.js +++ b/test/utils/Bytes.test.js @@ -209,6 +209,18 @@ describe('Bytes', function () { }); }); + describe('nibbles', function () { + it('full input', async function () { + await expect(this.mock.$toNibbles('0x0123456789abcdef')).to.eventually.equal( + '0x000102030405060708090a0b0c0d0e0f', + ); + }); + + it('empty input', async function () { + await expect(this.mock.$toNibbles('0x')).to.eventually.equal('0x'); + }); + }); + describe('equal', function () { it('identical buffers', async function () { await expect(this.mock.$equal(lorem, lorem)).to.eventually.be.true; diff --git a/test/utils/Memory.t.sol b/test/utils/Memory.t.sol index 86fb901b38c..cb8b8b390f1 100644 --- a/test/utils/Memory.t.sol +++ b/test/utils/Memory.t.sol @@ -35,4 +35,29 @@ contract MemoryTest is Test { length = bound(length, 0, input.length - offset); assertEq(input.asSlice().slice(offset, length).toBytes(), input.slice(offset, offset + length)); } + + function testSymbolicEqual(bytes memory a, bytes memory b) public pure { + Memory.Slice sliceA = a.asSlice(); + Memory.Slice sliceB = b.asSlice(); + bool expected = keccak256(a) == keccak256(b); + assertEq(Memory.equal(sliceA, sliceB), expected); + } + + function testEqual( + bytes memory a, + uint256 offsetA, + uint256 lengthA, + bytes memory b, + uint256 offsetB, + uint256 lengthB + ) public pure { + offsetA = bound(offsetA, 0, a.length); + offsetB = bound(offsetB, 0, b.length); + lengthA = bound(lengthA, 0, a.length - offsetA); + lengthB = bound(lengthB, 0, b.length - offsetB); + assertEq( + a.asSlice().slice(offsetA, lengthA).equal(b.asSlice().slice(offsetB, lengthB)), + keccak256(a.slice(offsetA, offsetA + lengthA)) == keccak256(b.slice(offsetB, offsetB + lengthB)) + ); + } } diff --git a/test/utils/cryptography/TrieProof.test.js b/test/utils/cryptography/TrieProof.test.js new file mode 100644 index 00000000000..78e961def20 --- /dev/null +++ b/test/utils/cryptography/TrieProof.test.js @@ -0,0 +1,676 @@ +const { ethers } = require('hardhat'); +const { expect } = require('chai'); +const { spawn } = require('child_process'); +const { MerklePatriciaTrie, createMerkleProof } = require('@ethereumjs/mpt'); + +const { Enum } = require('../../helpers/enums'); +const { zip } = require('../../helpers/iterate'); +const { generators } = require('../../helpers/random'); +const { batchInBlock } = require('../../helpers/txpool'); + +const ProofError = Enum( + 'NO_ERROR', // No error occurred during proof traversal + 'EMPTY_KEY', // The provided key is empty + 'INVALID_ROOT', // The validation of the root node failed + 'INVALID_LARGE_NODE', // The validation of a large node failed + 'INVALID_SHORT_NODE', // The validation of a short node failed + 'EMPTY_PATH', // The path in a leaf or extension node is empty + 'INVALID_PATH_REMAINDER', // The path remainder in a leaf or extension node is invalid + 'EMPTY_EXTENSION_PATH_REMAINDER', // The path remainder in an extension node is empty + 'INVALID_EXTRA_PROOF_ELEMENT', // A leaf value should be the last proof element + 'EMPTY_VALUE', // The leaf value is empty + 'MISMATCH_LEAF_PATH_KEY_REMAINDER', // The path remainder in a leaf node doesn't match the key remainder + 'UNKNOWN_NODE_PREFIX', // The node prefix is unknown + 'UNPARSEABLE_NODE', // The node cannot be parsed from RLP encoding + 'INVALID_PROOF', // General failure during proof traversal +); + +const ZeroBytes = generators.bytes.zero; + +const sanitizeHexString = value => (value.length % 2 ? '0x0' : '0x') + value.replace(/0x/, ''); +const encodeStorageLeaf = value => ethers.encodeRlp(ethers.stripZerosLeft(value)); + +describe('TrieProof', function () { + before('start anvil node', async function () { + const port = 8546; + + // start process and create provider + this.process = await spawn('anvil', ['--port', port], { timeout: 30000 }); + await new Promise(resolve => this.process.stdout.once('data', resolve)); + this.provider = new ethers.JsonRpcProvider(`http://localhost:${port}`); + + // deploy mock on the hardhat network + this.mock = await ethers.deployContract('$TrieProof'); + }); + + beforeEach('use fresh storage contract with empty state for each test', async function () { + this.storage = await this.provider.getSigner(0).then(signer => ethers.deployContract('StorageSlotMock', signer)); + this.target = await this.provider.getSigner(0).then(signer => ethers.deployContract('CallReceiverMock', signer)); + + this.getProof = ({ + provider = this.provider, + address = this.storage.target, + storageKeys = [], + blockNumber = 'latest', + }) => + provider.send('eth_getProof', [ + address, + ethers.isHexString(storageKeys) ? [storageKeys] : storageKeys, + blockNumber, + ]); + }); + + after('stop anvil node', async function () { + this.process.kill(); + }); + + describe('verify', function () { + it('verify transaction and receipt inclusion in block', async function () { + // Multiple transactions/events in a block + const txs = await batchInBlock( + [ + () => this.target.mockFunction({ gasLimit: 100000 }), + () => this.target.mockFunctionWithArgs(0, 1, { gasLimit: 100000 }), + () => this.target.mockFunctionWithArgs(17, 42, { gasLimit: 100000 }), + ], + this.provider, + ); + + // for some reason ethers doesn't expose the transactionsRoot in blocks, so we fetch the block details via RPC instead. + const { transactionsRoot, receiptsRoot } = await this.provider.send('eth_getBlockByNumber', [ + txs.at(0).blockNumber, + false, + ]); + + // Rebuild tries + const transactionTrie = new MerklePatriciaTrie(); + const receiptTrie = new MerklePatriciaTrie(); + + for (const tx of txs) { + const key = ethers.encodeRlp(ethers.stripZerosLeft(ethers.toBeHex(tx.index))); + + // Transaction + const encodedTransaction = await tx.getTransaction().then(tx => ethers.Transaction.from(tx).serialized); + await transactionTrie.put(ethers.getBytes(key), encodedTransaction); + + // Receipt + const encodedReceipt = ethers.concat([ + tx.type === 0 ? '0x' : ethers.toBeHex(tx.type), + ethers.encodeRlp([ + tx.status === 0 ? '0x' : '0x01', + ethers.toBeHex(tx.cumulativeGasUsed), + tx.logsBloom, + tx.logs.map(log => [log.address, log.topics, log.data]), + ]), + ]); + await receiptTrie.put(ethers.getBytes(key), encodedReceipt); + + Object.assign(tx, { key, encodedTransaction, encodedReceipt }); + } + + // Sanity check trie roots + expect(ethers.hexlify(transactionTrie.root())).to.equal(transactionsRoot); + expect(ethers.hexlify(receiptTrie.root())).to.equal(receiptsRoot); + + // Verify transaction inclusion in the block's transaction trie + for (const { key, encodedTransaction, encodedReceipt } of txs) { + const transactionProof = await createMerkleProof(transactionTrie, ethers.getBytes(key)); + await expect(this.mock.$verify(encodedTransaction, transactionsRoot, key, transactionProof)).to.eventually.be + .true; + + const receiptProof = await createMerkleProof(receiptTrie, ethers.getBytes(key)); + await expect(this.mock.$verify(encodedReceipt, receiptsRoot, key, receiptProof)).to.eventually.be.true; + } + }); + + describe('processes valid account and storage proofs', function () { + for (const { title, slots } of [ + { + title: 'returns true with proof size 1 (even leaf [0x20])', + slots: { + '0x0000000000000000000000000000000000000000000000000000000000000000': generators.bytes32(), // 0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563 + }, + }, + { + title: 'returns true with proof size 2 (branch then odd leaf [0x3])', + slots: { + '0x0000000000000000000000000000000000000000000000000000000000000000': generators.bytes32(), // 0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563 + '0x0000000000000000000000000000000000000000000000000000000000000001': generators.bytes32(), // 0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6 + }, + }, + { + title: 'returns true with proof size 3 (even extension [0x00], branch then leaf)', + slots: { + '0x0000000000000000000000000000000000000000000000000000000000001889': generators.bytes32(), // 0xabc4243e220df4927f4d7b432d2d718dadbba652f6cee6a45bb90c077fa4e158 + '0x0000000000000000000000000000000000000000000000000000000000008b23': generators.bytes32(), // 0xabd5ef9a39144905d28bd8554745ebae050359cf7e89079f49b66a6c06bd2bf9 + '0x0000000000000000000000000000000000000000000000000000000000002383': generators.bytes32(), // 0xabe87cb73c1e15a89cfb0daa7fd0cc3eb1a762345fe15d668f5061a4900b22fa + }, + }, + { + title: 'returns true with proof size 3 (odd extension [0x1], branch then leaf)', + slots: { + '0x0000000000000000000000000000000000000000000000000000000000004616': generators.bytes32(), // 0xabcd2ce29d227a0aaaa2ea425df9d5c96a569b416fd0bb7e018b8c9ce9b9d15d + '0x0000000000000000000000000000000000000000000000000000000000012dd3': generators.bytes32(), // 0xabce7718834e2932319fc4642268a27405261f7d3826b19811d044bf2b56ebb1 + '0x000000000000000000000000000000000000000000000000000000000000ce8f': generators.bytes32(), // 0xabcf8b375ce20d03da20a3f5efeb8f3666810beca66f729f995953f51559a4ff + }, + }, + ]) { + it(title, async function () { + // set storage state + const txs = await Promise.all( + Object.entries(slots).map(([slot, value]) => this.storage.setBytes32Slot(slot, value)), + ); + + // get block that contains the latest storage changes + const { stateRoot, number: blockNumber } = await txs.at(-1).getBlock(); + + // build storage proofs for all storage slots (in that block) + const { accountProof, storageHash, storageProof, codeHash } = await this.getProof({ + storageKeys: Object.keys(slots), + blockNumber: ethers.toBeHex(blockNumber), + }); + + // Verify account details in the block's state trie + await expect( + this.mock.$verify( + ethers.encodeRlp([ + '0x01', // nonce + '0x', // balance + storageHash, + codeHash, + ]), + stateRoot, + ethers.keccak256(this.storage.target), + accountProof, + ), + ).to.eventually.be.true; + + // Verify storage proof in the account's storage trie + for (const [[slot, value], { proof, value: proofValue, key }] of zip(Object.entries(slots), storageProof)) { + // proof sanity check + expect(sanitizeHexString(proofValue)).to.equal(ethers.stripZerosLeft(value), proofValue); + expect(sanitizeHexString(key)).to.equal(slot, key); + + // verify storage slot + await expect(this.mock.$verify(encodeStorageLeaf(value), storageHash, ethers.keccak256(slot), proof)).to + .eventually.be.true; + } + }); + } + }); + + it('returns false for invalid proof', async function () { + await expect(this.mock.$verify(ZeroBytes, ethers.ZeroHash, '0x', [])).to.eventually.be.false; + }); + }); + + describe('process invalid proofs', function () { + it('fails to process proof with empty key', async function () { + await expect(this.mock.$traverse(ethers.ZeroHash, '0x', [])) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.EMPTY_KEY); + + await expect(this.mock.$tryTraverse(ethers.ZeroHash, '0x', [])).to.eventually.deep.equal([ + ZeroBytes, + ProofError.EMPTY_KEY, + ]); + }); + + it('fails to process proof with invalid root hash', async function () { + const slot = generators.bytes32(); + const value = generators.bytes32(); + await this.storage.setBytes32Slot(slot, value); + const { + storageHash, + storageProof: [{ proof }], + } = await this.getProof({ storageKeys: [slot] }); + + // Correct root hash + await expect(this.mock.$verify(encodeStorageLeaf(value), storageHash, ethers.keccak256(slot), proof)).to + .eventually.be.true; + await expect(this.mock.$traverse(storageHash, ethers.keccak256(slot), proof)).to.eventually.equal( + encodeStorageLeaf(value), + ); + await expect(this.mock.$tryTraverse(storageHash, ethers.keccak256(slot), proof)).to.eventually.deep.equal([ + encodeStorageLeaf(value), + ProofError.NO_ERROR, + ]); + + // Corrupt root hash + const invalidHash = generators.bytes(32); + + await expect(this.mock.$verify(encodeStorageLeaf(value), invalidHash, ethers.keccak256(slot), proof)).to + .eventually.be.false; + await expect(this.mock.$traverse(invalidHash, ethers.keccak256(slot), proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_ROOT); + await expect(this.mock.$tryTraverse(invalidHash, ethers.keccak256(slot), proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_ROOT, + ]); + }); + + it('fails to process proof with invalid internal large hash', async function () { + // insert multiple values + const slot = generators.bytes32(); + const value = generators.bytes32(); + await this.storage.setBytes32Slot(slot, value); + await this.storage.setBytes32Slot(generators.bytes32(), generators.bytes32()); + + const { + storageHash, + storageProof: [{ proof }], + } = await this.getProof({ storageKeys: [slot] }); + + // Correct proof + await expect(this.mock.$verify(encodeStorageLeaf(value), storageHash, ethers.keccak256(slot), proof)).to + .eventually.be.true; + await expect(this.mock.$traverse(storageHash, ethers.keccak256(slot), proof)).to.eventually.equal( + encodeStorageLeaf(value), + ); + await expect(this.mock.$tryTraverse(storageHash, ethers.keccak256(slot), proof)).to.eventually.deep.equal([ + encodeStorageLeaf(value), + ProofError.NO_ERROR, + ]); + + // Corrupt proof - replace the value part with a random hash + const [p] = ethers.decodeRlp(proof[1]); + proof[1] = ethers.encodeRlp([p, ethers.encodeRlp(generators.bytes32())]); + + await expect(this.mock.$verify(encodeStorageLeaf(value), storageHash, ethers.keccak256(slot), proof)).to + .eventually.be.false; + await expect(this.mock.$traverse(storageHash, ethers.keccak256(slot), proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_LARGE_NODE); + await expect(this.mock.$tryTraverse(storageHash, ethers.keccak256(slot), proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_LARGE_NODE, + ]); + }); + + it('fails to process proof with invalid internal short node', async function () { + const key = '0x00'; + const proof = [ + ethers.encodeRlp(['0x0000', '0x2bad']), // corrupt internal short node + ethers.encodeRlp(['0x2000', '0x']), + ]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_SHORT_NODE); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_SHORT_NODE, + ]); + }); + + it('fails to process proof with empty value', async function () { + const key = '0x00'; + const proof = [ethers.encodeRlp(['0x2000', '0x'])]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.EMPTY_VALUE); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.EMPTY_VALUE, + ]); + }); + + it('fails to process proof with invalid extra proof', async function () { + const key = '0x00'; + const proof = [ + ethers.encodeRlp(['0x2000', '0x']), + ethers.encodeRlp([]), // extra proof element + ]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_EXTRA_PROOF_ELEMENT); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_EXTRA_PROOF_ELEMENT, + ]); + }); + + describe('fails to process proof with mismatched leaf path and key remainders', function () { + it('path is not a prefix of key', async function () { + const key = '0xabcd'; + const proof = [ + ethers.encodeRlp([ + '0x02abce', // Prefix.LEAF_EVEN + '0xabce' (not a prefix of 'abcd') + '0x2a', // value + ]), + ]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_PATH_REMAINDER); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_PATH_REMAINDER, + ]); + }); + + it('fails to process proof with empty path', async function () { + const proof = [ + ethers.encodeRlp(['0x', []]), // empty path + ]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), '0x00', proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.EMPTY_PATH); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), '0x00', proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.EMPTY_PATH, + ]); + }); + + it('path is longer than key', async function () { + const key = '0xabcd'; + const proof = [ + ethers.encodeRlp([ + '0x030abcde', // Prefix.LEAF_ODD + '0xabcde' (not a prefix of 'abcd') + '0x2a', // value + ]), + ]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_PATH_REMAINDER); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_PATH_REMAINDER, + ]); + }); + + it('fails to process proof with empty extension path remainder', async function () { + const key = '0x00'; + const node2 = ['0x00', '0x']; + const node1 = [node2].concat(Array(16).fill('0x')); + const node0 = [node1].concat(Array(16).fill('0x')); + const proof = [ethers.encodeRlp(node0), ethers.encodeRlp(node1), ethers.encodeRlp(node2)]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.EMPTY_EXTENSION_PATH_REMAINDER); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.EMPTY_EXTENSION_PATH_REMAINDER, + ]); + }); + + it('key not fully consumed', async function () { + const key = '0xabcd'; + const proof = [ + ethers.encodeRlp([ + '0x3abc', // Prefix.LEAF_ODD + '0xabc' (a prefix of 'abcd' that doesn't consume the d) + '0x2a', // value + ]), + ]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.MISMATCH_LEAF_PATH_KEY_REMAINDER); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.MISMATCH_LEAF_PATH_KEY_REMAINDER, + ]); + }); + }); + + it('fails to process proof with unknown node prefix', async function () { + const key = '0x00'; + const proof = [ethers.encodeRlp(['0x40', '0x'])]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.UNKNOWN_NODE_PREFIX); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.UNKNOWN_NODE_PREFIX, + ]); + }); + + it('fails to process proof with unparsable node', async function () { + const key = '0x00'; + const proof = [ethers.encodeRlp(['0x00', '0x00', '0x00'])]; + + await expect(this.mock.$traverse(ethers.keccak256(proof[0]), key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.UNPARSEABLE_NODE); + await expect(this.mock.$tryTraverse(ethers.keccak256(proof[0]), key, proof)).to.eventually.deep.equal([ + ZeroBytes, + ProofError.UNPARSEABLE_NODE, + ]); + }); + + it('fails to process proof with invalid proof', async function () { + await expect(this.mock.$traverse(ethers.ZeroHash, '0x00', [])) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(ProofError.INVALID_PROOF); + await expect(this.mock.$tryTraverse(ethers.ZeroHash, '0x00', [])).to.eventually.deep.equal([ + ZeroBytes, + ProofError.INVALID_PROOF, + ]); + }); + }); + + // Unit tests from https://github.com/ethereum-optimism/optimism/blob/ef970556e668b271a152124023a8d6bb5159bacf/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol + describe('Optimism contract-bedrock unit tests', function () { + for (const { title, root, key, value, proof, error } of [ + { + title: 'test_get_validProof1_succeeds', + root: '0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f', + key: '0x6b6579326262', + value: '0x6176616c32', + proof: [ + '0xe68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386', + '0xf84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080', + '0xca83206262856176616c32', + ], + }, + { + title: 'test_get_validProof2_succeeds', + root: '0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f', + key: '0x6b6579316161', + value: '0x303132333435363738393031323334353637383930313233343536373839303132333435363738397878', + proof: [ + '0xe68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386', + '0xf84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080', + '0xef83206161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878', + ], + }, + { + title: 'test_get_validProof3_succeeds', + root: '0xf838216fa749aefa91e0b672a9c06d3e6e983f913d7107b5dab4af60b5f5abed', + key: '0x6b6579316161', + value: '0x303132333435363738393031323334353637383930313233343536373839303132333435363738397878', + proof: [ + '0xf387206b6579316161aa303132333435363738393031323334353637383930313233343536373839303132333435363738397878', + ], + }, + { + title: 'test_get_validProof4_succeeds', + root: '0x37956bab6bba472308146808d5311ac19cb4a7daae5df7efcc0f32badc97f55e', + key: '0x6b6579316161', + value: '0x3031323334', + proof: ['0xce87206b6579316161853031323334'], + }, + { + title: 'test_get_validProof5_succeeds', + root: '0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89', + key: '0x6b657931', + value: '0x30313233343536373839303132333435363738393031323334353637383930313233343536373839566572795f4c6f6e67', + proof: [ + '0xe68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779', + '0xf87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080', + '0xf862808080808080a057895fdbd71e2c67c2f9274a56811ff5cf458720a7fa713a135e3890f8cafcf8808080808080808080b130313233343536373839303132333435363738393031323334353637383930313233343536373839566572795f4c6f6e67', + ], + }, + { + title: 'test_get_validProof6_succeeds', + root: '0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89', + key: '0x6b657932', + value: '0x73686f7274', + proof: [ + '0xe68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779', + '0xf87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080', + '0xdf808080808080c9823262856176616c338080808080808080808573686f7274', + ], + }, + { + title: 'test_get_validProof7_succeeds', + root: '0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89', + key: '0x6b657933', + value: '0x31323334353637383930313233343536373839303132333435363738393031', + proof: [ + '0xe68416b65793a0f3f387240403976788281c0a6ee5b3fc08360d276039d635bb824ea7e6fed779', + '0xf87180a034d14ccc7685aa2beb64f78b11ee2a335eae82047ef97c79b7dda7f0732b9f4ca05fb052b64e23d177131d9f32e9c5b942209eb7229e9a07c99a5d93245f53af18a09a137197a43a880648d5887cce656a5e6bbbe5e44ecb4f264395ccaddbe1acca80808080808080808080808080', + '0xf839808080808080c9823363856176616c338080808080808080809f31323334353637383930313233343536373839303132333435363738393031', + ], + }, + { + title: 'test_get_validProof8_succeeds', + root: '0x72e6c01ad0c9a7b517d4bc68a5b323287fe80f0e68f5415b4b95ecbc8ad83978', + key: '0x61', + value: '0x61', + proof: [ + '0xd916d780c22061c22062c2206380808080808080808080808080', + '0xd780c22061c22062c2206380808080808080808080808080', + '0xc22061', + ], + }, + { + title: 'test_get_validProof9_succeeds', + root: '0x72e6c01ad0c9a7b517d4bc68a5b323287fe80f0e68f5415b4b95ecbc8ad83978', + key: '0x62', + value: '0x62', + proof: [ + '0xd916d780c22061c22062c2206380808080808080808080808080', + '0xd780c22061c22062c2206380808080808080808080808080', + '0xc22062', + ], + }, + { + title: 'test_get_validProof10_succeeds', + root: '0x72e6c01ad0c9a7b517d4bc68a5b323287fe80f0e68f5415b4b95ecbc8ad83978', + key: '0x63', + value: '0x63', + proof: [ + '0xd916d780c22061c22062c2206380808080808080808080808080', + '0xd780c22061c22062c2206380808080808080808080808080', + '0xc22063', + ], + }, + { + title: 'test_get_nonexistentKey1_reverts', + root: '0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f', + key: '0x6b657932', + proof: [ + '0xe68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386', + '0xf84580a0582eed8dd051b823d13f8648cdcd08aa2d8dac239f458863c4620e8c4d605debca83206262856176616c32ca83206363856176616c3380808080808080808080808080', + '0xca83206262856176616c32', + ], + error: ProofError.INVALID_PATH_REMAINDER, + }, + { + title: 'test_get_nonexistentKey2_reverts', + root: '0xd582f99275e227a1cf4284899e5ff06ee56da8859be71b553397c69151bc942f', + key: '0x616e7972616e646f6d6b6579', + proof: ['0xe68416b65793a03101b4447781f1e6c51ce76c709274fc80bd064f3a58ff981b6015348a826386'], + error: ProofError.INVALID_PATH_REMAINDER, + }, + { + title: 'test_get_wrongKeyProof_reverts', + root: '0x2858eebfa9d96c8a9e6a0cae9d86ec9189127110f132d63f07d3544c2a75a696', + key: '0x6b6579316161', + proof: [ + '0xe216a04892c039d654f1be9af20e88ae53e9ab5fa5520190e0fb2f805823e45ebad22f', + '0xf84780d687206e6f746865728d33343938683472697568677765808080808080808080a0854405b57aa6dc458bc41899a761cbbb1f66a4998af6dd0e8601c1b845395ae38080808080', + '0xd687206e6f746865728d33343938683472697568677765', + ], + error: ProofError.INVALID_SHORT_NODE, + }, + // test_get_corruptedProof_reverts - RLP Encoding + // test_get_invalidDataRemainder_reverts - RLP Encoding + { + title: 'test_get_invalidInternalNodeHash_reverts', + root: '0xa827dff1a657bb9bb9a1c3abe9db173e2f1359f15eb06f1647ea21ac7c95d8fa', + key: '0xaa', + proof: [ + '0xe21aa09862c6b113008c4204c13755693cbb868acc25ebaa98db11df8c89a0c0dd3157', + '0xf380808080808080808080a0de2a9c6a46b6ea71ab9e881c8420570cf19e833c85df6026b04f085016e78f00c220118080808080', + '0xde2a9c6a46b6ea71ab9e881c8420570cf19e833c85df6026b04f085016e78f', + ], + error: ProofError.INVALID_SHORT_NODE, + }, + { + title: 'test_get_zeroBranchValueLength_reverts', + root: '0xe04b3589eef96b237cd49ccb5dcf6e654a47682bfa0961d563ab843f7ad1e035', + key: '0xaa', + proof: [ + '0xdd8200aad98080808080808080808080c43b82aabbc43c82aacc80808080', + '0xd98080808080808080808080c43b82aabbc43c82aacc80808080', + ], + error: ProofError.EMPTY_VALUE, + }, + { + title: 'test_get_zeroLengthKey_reverts', + root: '0x54157fd62cdf2f474e7bfec2d3cd581e807bee38488c9590cb887add98936b73', + key: '0x', + proof: ['0xc78320f00082b443'], + error: ProofError.EMPTY_KEY, + }, + { + title: 'test_get_smallerPathThanKey1_reverts', + root: '0xa513ba530659356fb7588a2c831944e80fd8aedaa5a4dc36f918152be2be0605', + key: '0x01', + proof: [ + '0xdb10d9c32081bbc582202381aa808080808080808080808080808080', + '0xd9c32081bbc582202381aa808080808080808080808080808080', + '0xc582202381aa', + ], + error: ProofError.INVALID_PATH_REMAINDER, + }, + { + title: 'test_get_smallerPathThanKey2_reverts', + root: '0xa06abffaec4ebe8ccde595f4547b864b4421b21c1fc699973f94710c9bc17979', + key: '0xaa', + proof: [ + '0xe21aa07ea462226a3dc0a46afb4ded39306d7a84d311ada3557dfc75a909fd25530905', + '0xf380808080808080808080a027f11bd3af96d137b9287632f44dd00fea1ca1bd70386c30985ede8cc287476e808080c220338080', + '0xe48200bba0a6911545ed01c2d3f4e15b8b27c7bfba97738bd5e6dd674dd07033428a4c53af', + ], + error: ProofError.INVALID_PATH_REMAINDER, + }, + { + title: 'test_get_extraProofElements_reverts', + root: '0x278c88eb59beba4f8b94f940c41614bb0dd80c305859ebffcd6ce07c93ca3749', + key: '0xaa', + proof: [ + '0xd91ad780808080808080808080c32081aac32081ab8080808080', + '0xd780808080808080808080c32081aac32081ab8080808080', + '0xc32081aa', + '0xc32081aa', + ], + error: ProofError.INVALID_EXTRA_PROOF_ELEMENT, + }, + ]) { + it(title, async function () { + if (error === undefined) { + await expect(this.mock.$traverse(root, key, proof)).to.eventually.equal(value); + } else { + await expect(this.mock.$traverse(root, key, proof)) + .to.revertedWithCustomError(this.mock, 'TrieProofTraversalError') + .withArgs(error); + } + + await expect(this.mock.$tryTraverse(root, key, proof)).to.eventually.deep.equal([ + value ?? ZeroBytes, + error ?? ProofError.NO_ERROR, + ]); + }); + } + }); +});