diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 574f1eda791..817fd9524df 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -24,8 +24,6 @@ env: LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} - # Self-hosted runners need to reference a different host for `./watch` tests. - WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} # Disable incremental compilation CARGO_INCREMENTAL: 0 # Enable portable to prevent issues with caching `blst` for the wrong CPU type diff --git a/Cargo.lock b/Cargo.lock index c51e3583d02..3a63b6cdf2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -685,61 +685,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" -[[package]] -name = "axum" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" -dependencies = [ - "async-trait", - "axum-core", - "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.6.0", - "hyper-util", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", - "sync_wrapper 1.0.2", - "tokio", - "tower", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "axum-core" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "http-body-util", - "mime", - "pin-project-lite", - "rustversion", - "sync_wrapper 1.0.2", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "backtrace" version = "0.3.74" @@ -1096,16 +1041,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "bollard-stubs" -version = "1.42.0-rc.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed59b5c00048f48d7af971b71f800fdf23e858844a6f9e4d32ca72e9399e7864" -dependencies = [ - "serde", - "serde_with", -] - [[package]] name = "boot_node" version = "7.0.0-beta.0" @@ -2114,53 +2049,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "diesel" -version = "2.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04001f23ba8843dc315804fa324000376084dfb1c30794ff68dd279e6e5696d5" -dependencies = [ - "bitflags 2.8.0", - "byteorder", - "diesel_derives", - "itoa", - "pq-sys", - "r2d2", -] - -[[package]] -name = "diesel_derives" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f2c3de51e2ba6bf2a648285696137aaf0f5f487bcbea93972fe8a364e131a4" -dependencies = [ - "diesel_table_macro_syntax", - "dsl_auto_type", - "proc-macro2", - "quote", - "syn 2.0.98", -] - -[[package]] -name = "diesel_migrations" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a73ce704bad4231f001bff3314d91dce4aba0770cee8b233991859abc15c1f6" -dependencies = [ - "diesel", - "migrations_internals", - "migrations_macros", -] - -[[package]] -name = "diesel_table_macro_syntax" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" -dependencies = [ - "syn 2.0.98", -] - [[package]] name = "digest" version = "0.9.0" @@ -2293,20 +2181,6 @@ dependencies = [ "types", ] -[[package]] -name = "dsl_auto_type" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b" -dependencies = [ - "darling 0.20.10", - "either", - "heck 0.5.0", - "proc-macro2", - "quote", - "syn 2.0.98", -] - [[package]] name = "dtoa" version = "1.0.9" @@ -2907,7 +2781,7 @@ dependencies = [ "serde", "serde_json", "syn 1.0.109", - "toml 0.5.11", + "toml", "url", "walkdir", ] @@ -5647,22 +5521,6 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" -[[package]] -name = "matchit" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" - -[[package]] -name = "md-5" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" -dependencies = [ - "cfg-if", - "digest 0.10.7", -] - [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -5737,27 +5595,6 @@ dependencies = [ "prometheus", ] -[[package]] -name = "migrations_internals" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd01039851e82f8799046eabbb354056283fb265c8ec0996af940f4e85a380ff" -dependencies = [ - "serde", - "toml 0.8.19", -] - -[[package]] -name = "migrations_macros" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb161cc72176cb37aa47f1fc520d3ef02263d67d661f44f05d05a079e1237fd" -dependencies = [ - "migrations_internals", - "proc-macro2", - "quote", -] - [[package]] name = "milhouse" version = "0.3.0" @@ -6629,24 +6466,6 @@ dependencies = [ "rustc_version 0.4.1", ] -[[package]] -name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_shared" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" -dependencies = [ - "siphasher", -] - [[package]] name = "pin-project" version = "1.1.8" @@ -6783,35 +6602,6 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" -[[package]] -name = "postgres-protocol" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ff0abab4a9b844b93ef7b81f1efc0a366062aaef2cd702c76256b5dc075c54" -dependencies = [ - "base64 0.22.1", - "byteorder", - "bytes", - "fallible-iterator", - "hmac 0.12.1", - "md-5", - "memchr", - "rand 0.9.0", - "sha2 0.10.8", - "stringprep", -] - -[[package]] -name = "postgres-types" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613283563cd90e1dfc3518d548caee47e0e725455ed619881f5cf21f36de4b48" -dependencies = [ - "bytes", - "fallible-iterator", - "postgres-protocol", -] - [[package]] name = "powerfmt" version = "0.2.0" @@ -6827,16 +6617,6 @@ dependencies = [ "zerocopy 0.7.35", ] -[[package]] -name = "pq-sys" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b51d65ebe1cb1f40641b15abae017fed35ccdda46e3dab1ff8768f625a3222" -dependencies = [ - "libc", - "vcpkg", -] - [[package]] name = "pretty_reqwest_error" version = "0.1.0" @@ -7400,7 +7180,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper", "system-configuration 0.5.1", "tokio", "tokio-native-tls", @@ -8057,16 +7837,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_path_to_error" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" -dependencies = [ - "itoa", - "serde", -] - [[package]] name = "serde_repr" version = "0.1.19" @@ -8078,15 +7848,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "serde_spanned" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -8099,28 +7860,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros", -] - -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_yaml" version = "0.9.34+deprecated" @@ -8298,12 +8037,6 @@ dependencies = [ "types", ] -[[package]] -name = "siphasher" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" - [[package]] name = "slab" version = "0.4.9" @@ -8667,17 +8400,6 @@ dependencies = [ "zstd 0.13.2", ] -[[package]] -name = "stringprep" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" -dependencies = [ - "unicode-bidi", - "unicode-normalization", - "unicode-properties", -] - [[package]] name = "strsim" version = "0.10.0" @@ -8770,12 +8492,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "sync_wrapper" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" - [[package]] name = "synstructure" version = "0.13.1" @@ -8960,23 +8676,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "testcontainers" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d2931d7f521af5bae989f716c3fa43a6af9af7ec7a5e21b59ae40878cec00" -dependencies = [ - "bollard-stubs", - "futures", - "hex", - "hmac 0.12.1", - "log", - "rand 0.8.5", - "serde", - "serde_json", - "sha2 0.10.8", -] - [[package]] name = "thiserror" version = "1.0.69" @@ -9221,32 +8920,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-postgres" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c95d533c83082bb6490e0189acaa0bbeef9084e60471b696ca6988cd0541fb0" -dependencies = [ - "async-trait", - "byteorder", - "bytes", - "fallible-iterator", - "futures-channel", - "futures-util", - "log", - "parking_lot 0.12.3", - "percent-encoding", - "phf", - "pin-project-lite", - "postgres-protocol", - "postgres-types", - "rand 0.9.0", - "socket2", - "tokio", - "tokio-util", - "whoami", -] - [[package]] name = "tokio-rustls" version = "0.24.1" @@ -9304,26 +8977,11 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.23", -] - [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" -dependencies = [ - "serde", -] [[package]] name = "toml_edit" @@ -9343,34 +9001,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ "indexmap 2.7.1", - "serde", - "serde_spanned", "toml_datetime", "winnow 0.7.0", ] -[[package]] -name = "tower" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper 1.0.2", - "tokio", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" - [[package]] name = "tower-service" version = "0.3.3" @@ -9636,12 +9270,6 @@ version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" -[[package]] -name = "unicode-bidi" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" - [[package]] name = "unicode-ident" version = "1.0.16" @@ -9657,12 +9285,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-properties" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" - [[package]] name = "unicode-xid" version = "0.2.6" @@ -10094,12 +9716,6 @@ dependencies = [ "wit-bindgen-rt", ] -[[package]] -name = "wasite" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" - [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -10199,40 +9815,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "watch" -version = "0.1.0" -dependencies = [ - "axum", - "beacon_chain", - "beacon_node", - "bls", - "clap", - "clap_utils", - "diesel", - "diesel_migrations", - "env_logger 0.9.3", - "eth2", - "http_api", - "hyper 1.6.0", - "log", - "logging", - "network", - "r2d2", - "rand 0.8.5", - "reqwest", - "serde", - "serde_json", - "serde_yaml", - "task_executor", - "testcontainers", - "tokio", - "tokio-postgres", - "types", - "unused_port", - "url", -] - [[package]] name = "web-sys" version = "0.3.77" @@ -10299,17 +9881,6 @@ dependencies = [ "rustix 0.38.44", ] -[[package]] -name = "whoami" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" -dependencies = [ - "redox_syscall 0.5.8", - "wasite", - "web-sys", -] - [[package]] name = "widestring" version = "0.4.3" diff --git a/Cargo.toml b/Cargo.toml index 73912f60822..54729a60a63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,8 +104,6 @@ members = [ "validator_client/validator_store", "validator_manager", - - "watch", ] resolver = "2" diff --git a/book/src/setup.md b/book/src/setup.md index d3da68f97cc..7143c8f0fb0 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -17,9 +17,6 @@ The additional requirements for developers are: some dependencies. See [`Installation Guide`](./installation.md) for more info. - [`java 17 runtime`](https://openjdk.java.net/projects/jdk/). 17 is the minimum, used by web3signer_tests. -- [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also known as - `libpq-devel` on some systems. -- [`docker`](https://www.docker.com/). Some tests need docker installed and **running**. ## Using `make` diff --git a/watch/.gitignore b/watch/.gitignore deleted file mode 100644 index 5b6b0720c9e..00000000000 --- a/watch/.gitignore +++ /dev/null @@ -1 +0,0 @@ -config.yaml diff --git a/watch/Cargo.toml b/watch/Cargo.toml deleted file mode 100644 index 41cfb58e287..00000000000 --- a/watch/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "watch" -version = "0.1.0" -edition = { workspace = true } - -[lib] -name = "watch" -path = "src/lib.rs" - -[[bin]] -name = "watch" -path = "src/main.rs" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -axum = "0.7" -beacon_node = { workspace = true } -bls = { workspace = true } -clap = { workspace = true } -clap_utils = { workspace = true } -diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } -diesel_migrations = { version = "2.0.0", features = ["postgres"] } -env_logger = { workspace = true } -eth2 = { workspace = true } -hyper = { workspace = true } -log = { workspace = true } -r2d2 = { workspace = true } -rand = { workspace = true } -reqwest = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -serde_yaml = { workspace = true } -tokio = { workspace = true } -types = { workspace = true } -url = { workspace = true } - -[dev-dependencies] -beacon_chain = { workspace = true } -http_api = { workspace = true } -logging = { workspace = true } -network = { workspace = true } -task_executor = { workspace = true } -testcontainers = "0.15" -tokio-postgres = "0.7.5" -unused_port = { workspace = true } diff --git a/watch/README.md b/watch/README.md deleted file mode 100644 index 877cddf2346..00000000000 --- a/watch/README.md +++ /dev/null @@ -1,458 +0,0 @@ -## beacon.watch - ->beacon.watch is pre-MVP and still under active development and subject to change. - -beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to -data which is: -1. Not already stored natively in the Beacon Chain -2. Too specialized for Block Explorers -3. Too sensitive for public Block Explorers - - -### Requirements -- `git` -- `rust` : https://rustup.rs/ -- `libpq` : https://www.postgresql.org/download/ -- `diesel_cli` : -``` -cargo install diesel_cli --no-default-features --features postgres -``` -- `docker` : https://docs.docker.com/engine/install/ -- `docker-compose` : https://docs.docker.com/compose/install/ - -### Setup -1. Setup the database: -``` -cd postgres_docker_compose -docker-compose up -``` - -1. Ensure the tests pass: -``` -cargo test --release -``` - -1. Drop the database (if it already exists) and run the required migrations: -``` -diesel database reset --database-url postgres://postgres:postgres@localhost/dev -``` - -1. Ensure a synced Lighthouse beacon node with historical states is available -at `localhost:5052`. - -1. Run the updater daemon: -``` -cargo run --release -- run-updater -``` - -1. Start the HTTP API server: -``` -cargo run --release -- serve -``` - -1. Ensure connectivity: -``` -curl "http://localhost:5059/v1/slots/highest" -``` - -> Functionality on MacOS has not been tested. Windows is not supported. - - -### Configuration -beacon.watch can be configured through the use of a config file. -Available options can be seen in `config.yaml.default`. - -You can specify a config file during runtime: -``` -cargo run -- run-updater --config path/to/config.yaml -cargo run -- serve --config path/to/config.yaml -``` - -You can specify only the parts of the config file which you need changed. -Missing values will remain as their defaults. - -For example, if you wish to run with default settings but only wish to alter `log_level` -your config file would be: -```yaml -# config.yaml -log_level = "info" -``` - -### Available Endpoints -As beacon.watch continues to develop, more endpoints will be added. - -> In these examples any data containing information from blockprint has either been redacted or fabricated. - -#### `/v1/slots/{slot}` -```bash -curl "http://localhost:5059/v1/slots/4635296" -``` -```json -{ - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "skipped": false, - "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" -} -``` - -#### `/v1/slots?start_slot={}&end_slot={}` -```bash -curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297" -``` -```json -[ - { - "slot": "4635297", - "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", - "skipped": false, - "beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182" - }, - { - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "skipped": false, - "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" - } -] -``` - -#### `/v1/slots/lowest` -```bash -curl "http://localhost:5059/v1/slots/lowest" -``` -```json -{ - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "skipped": false, - "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" -} -``` - -#### `/v1/slots/highest` -```bash -curl "http://localhost:5059/v1/slots/highest" -``` -```json -{ - "slot": "4635358", - "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", - "skipped": false, - "beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b" -} -``` - -#### `v1/slots/{slot}/block` -```bash -curl "http://localhost:5059/v1/slots/4635296/block" -``` -```json -{ - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" -} -``` - -#### `/v1/blocks/{block_id}` -```bash -curl "http://localhost:5059/v1/blocks/4635296" -# OR -curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" -``` -```json -{ - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" -} -``` - -#### `/v1/blocks?start_slot={}&end_slot={}` -```bash -curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297" -``` -```json -[ - { - "slot": "4635297", - "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", - "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" - }, - { - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" - } -] -``` - -#### `/v1/blocks/{block_id}/previous` -```bash -curl "http://localhost:5059/v1/blocks/4635297/previous" -# OR -curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous" -``` -```json -{ - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" -} -``` - -#### `/v1/blocks/{block_id}/next` -```bash -curl "http://localhost:5059/v1/blocks/4635296/next" -# OR -curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next" -``` -```json -{ - "slot": "4635297", - "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", - "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" -} -``` - -#### `/v1/blocks/lowest` -```bash -curl "http://localhost:5059/v1/blocks/lowest" -``` -```json -{ - "slot": "4635296", - "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", - "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" -} -``` - -#### `/v1/blocks/highest` -```bash -curl "http://localhost:5059/v1/blocks/highest" -``` -```json -{ - "slot": "4635358", - "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", - "parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2" -} -``` - -#### `/v1/blocks/{block_id}/proposer` -```bash -curl "http://localhost:5059/v1/blocks/4635296/proposer" -# OR -curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer" - -``` -```json -{ - "slot": "4635296", - "proposer_index": 223126, - "graffiti": "" -} -``` - -#### `/v1/blocks/{block_id}/rewards` -```bash -curl "http://localhost:5059/v1/blocks/4635296/reward" -# OR -curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward" - -``` -```json -{ - "slot": "4635296", - "total": 25380059, - "attestation_reward": 24351867, - "sync_committee_reward": 1028192 -} -``` - -#### `/v1/blocks/{block_id}/packing` -```bash -curl "http://localhost:5059/v1/blocks/4635296/packing" -# OR -curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing" - -``` -```json -{ - "slot": "4635296", - "available": 16152, - "included": 13101, - "prior_skip_slots": 0 -} -``` - -#### `/v1/validators/{validator}` -```bash -curl "http://localhost:5059/v1/validators/1" -# OR -curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c" -``` -```json -{ - "index": 1, - "public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", - "status": "active_ongoing", - "client": null, - "activation_epoch": 0, - "exit_epoch": null -} -``` - -#### `/v1/validators/{validator}/attestation/{epoch}` -```bash -curl "http://localhost:5059/v1/validators/1/attestation/144853" -# OR -curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853" -``` -```json -{ - "index": 1, - "epoch": "144853", - "source": true, - "head": true, - "target": true -} -``` - -#### `/v1/validators/missed/{vote}/{epoch}` -```bash -curl "http://localhost:5059/v1/validators/missed/head/144853" -``` -```json -[ - 63, - 67, - 98, - ... -] -``` - -#### `/v1/validators/missed/{vote}/{epoch}/graffiti` -```bash -curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti" -``` -```json -{ - "Mr F was here": 3, - "Lighthouse/v3.1.0-aa022f4": 5, - ... -} -``` - -#### `/v1/clients/missed/{vote}/{epoch}` -```bash -curl "http://localhost:5059/v1/clients/missed/source/144853" -``` -```json -{ - "Lighthouse": 100, - "Lodestar": 100, - "Nimbus": 100, - "Prysm": 100, - "Teku": 100, - "Unknown": 100 -} -``` - -#### `/v1/clients/missed/{vote}/{epoch}/percentages` -Note that this endpoint expresses the following: -``` -What percentage of each client implementation missed this vote? -``` - -```bash -curl "http://localhost:5059/v1/clients/missed/target/144853/percentages" -``` -```json -{ - "Lighthouse": 0.51234567890, - "Lodestar": 0.51234567890, - "Nimbus": 0.51234567890, - "Prysm": 0.09876543210, - "Teku": 0.09876543210, - "Unknown": 0.05647382910 -} -``` - -#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative` -Note that this endpoint expresses the following: -``` -For the validators which did miss this vote, what percentage of them were from each client implementation? -``` -You can check these values against the output of `/v1/clients/percentages` to see any discrepancies. - -```bash -curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative" -``` -```json -{ - "Lighthouse": 11.11111111111111, - "Lodestar": 11.11111111111111, - "Nimbus": 11.11111111111111, - "Prysm": 16.66666666666667, - "Teku": 16.66666666666667, - "Unknown": 33.33333333333333 -} - -``` - -#### `/v1/clients` -```bash -curl "http://localhost:5059/v1/clients" -``` -```json -{ - "Lighthouse": 5000, - "Lodestar": 5000, - "Nimbus": 5000, - "Prysm": 5000, - "Teku": 5000, - "Unknown": 5000 -} -``` - -#### `/v1/clients/percentages` -```bash -curl "http://localhost:5059/v1/clients/percentages" -``` -```json -{ - "Lighthouse": 16.66666666666667, - "Lodestar": 16.66666666666667, - "Nimbus": 16.66666666666667, - "Prysm": 16.66666666666667, - "Teku": 16.66666666666667, - "Unknown": 16.66666666666667 -} -``` - -### Future work -- New tables - - `skip_slots`? - - -- More API endpoints - - `/v1/proposers?start_epoch={}&end_epoch={}` and similar - - `/v1/validators/{status}/count` - - -- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills. - - -- Better/prettier (async?) logging. - - -- Connect to a range of beacon_nodes to sync different components concurrently. -Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync. - - -### Architecture -Connection Pooling: -- 1 Pool for Updater (read and write) -- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this) diff --git a/watch/config.yaml.default b/watch/config.yaml.default deleted file mode 100644 index 131609237cb..00000000000 --- a/watch/config.yaml.default +++ /dev/null @@ -1,49 +0,0 @@ ---- -database: - user: "postgres" - password: "postgres" - dbname: "dev" - default_dbname: "postgres" - host: "localhost" - port: 5432 - connect_timeout_millis: 2000 - -server: - listen_addr: "127.0.0.1" - listen_port: 5059 - -updater: - # The URL of the Beacon Node to perform sync tasks with. - # Cannot yet accept multiple beacon nodes. - beacon_node_url: "http://localhost:5052" - # The number of epochs to backfill. Must be below 100. - max_backfill_size_epochs: 2 - # The epoch at which to stop backfilling. - backfill_stop_epoch: 0 - # Whether to sync the attestations table. - attestations: true - # Whether to sync the proposer_info table. - proposer_info: true - # Whether to sync the block_rewards table. - block_rewards: true - # Whether to sync the block_packing table. - block_packing: true - -blockprint: - # Whether to sync client information from blockprint. - enabled: false - # The URL of the blockprint server. - url: "" - # The username used to authenticate to the blockprint server. - username: "" - # The password used to authenticate to the blockprint server. - password: "" - -# Log level. -# Valid options are: -# - "trace" -# - "debug" -# - "info" -# - "warn" -# - "error" -log_level: "debug" diff --git a/watch/diesel.toml b/watch/diesel.toml deleted file mode 100644 index bfb01bccf0f..00000000000 --- a/watch/diesel.toml +++ /dev/null @@ -1,5 +0,0 @@ -# For documentation on how to configure this file, -# see diesel.rs/guides/configuring-diesel-cli - -[print_schema] -file = "src/database/schema.rs" diff --git a/watch/migrations/.gitkeep b/watch/migrations/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/watch/migrations/00000000000000_diesel_initial_setup/down.sql b/watch/migrations/00000000000000_diesel_initial_setup/down.sql deleted file mode 100644 index a9f52609119..00000000000 --- a/watch/migrations/00000000000000_diesel_initial_setup/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - -DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); -DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/watch/migrations/00000000000000_diesel_initial_setup/up.sql b/watch/migrations/00000000000000_diesel_initial_setup/up.sql deleted file mode 100644 index d68895b1a7b..00000000000 --- a/watch/migrations/00000000000000_diesel_initial_setup/up.sql +++ /dev/null @@ -1,36 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - - - - --- Sets up a trigger for the given table to automatically set a column called --- `updated_at` whenever the row is modified (unless `updated_at` was included --- in the modified columns) --- --- # Example --- --- ```sql --- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); --- --- SELECT diesel_manage_updated_at('users'); --- ``` -CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ -BEGIN - EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); -END; -$$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ -BEGIN - IF ( - NEW IS DISTINCT FROM OLD AND - NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at - ) THEN - NEW.updated_at := current_timestamp; - END IF; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql deleted file mode 100644 index 551ed6605c7..00000000000 --- a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE canonical_slots diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql deleted file mode 100644 index 2629f11a4c7..00000000000 --- a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE canonical_slots ( - slot integer PRIMARY KEY, - root bytea NOT NULL, - skipped boolean NOT NULL, - beacon_block bytea UNIQUE -) diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql deleted file mode 100644 index 8901956f47c..00000000000 --- a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE beacon_blocks diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql deleted file mode 100644 index 250c667b232..00000000000 --- a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE beacon_blocks ( - slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE, - root bytea REFERENCES canonical_slots(beacon_block) NOT NULL, - parent_root bytea NOT NULL, - attestation_count integer NOT NULL, - transaction_count integer -) diff --git a/watch/migrations/2022-01-01-000002_validators/down.sql b/watch/migrations/2022-01-01-000002_validators/down.sql deleted file mode 100644 index 17819fc3491..00000000000 --- a/watch/migrations/2022-01-01-000002_validators/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE validators diff --git a/watch/migrations/2022-01-01-000002_validators/up.sql b/watch/migrations/2022-01-01-000002_validators/up.sql deleted file mode 100644 index 69cfef6772b..00000000000 --- a/watch/migrations/2022-01-01-000002_validators/up.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE validators ( - index integer PRIMARY KEY, - public_key bytea NOT NULL, - status text NOT NULL, - activation_epoch integer, - exit_epoch integer -) diff --git a/watch/migrations/2022-01-01-000003_proposer_info/down.sql b/watch/migrations/2022-01-01-000003_proposer_info/down.sql deleted file mode 100644 index d61330be5b2..00000000000 --- a/watch/migrations/2022-01-01-000003_proposer_info/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE proposer_info diff --git a/watch/migrations/2022-01-01-000003_proposer_info/up.sql b/watch/migrations/2022-01-01-000003_proposer_info/up.sql deleted file mode 100644 index 488aedb2730..00000000000 --- a/watch/migrations/2022-01-01-000003_proposer_info/up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE proposer_info ( - slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, - proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL, - graffiti text NOT NULL -) diff --git a/watch/migrations/2022-01-01-000004_active_config/down.sql b/watch/migrations/2022-01-01-000004_active_config/down.sql deleted file mode 100644 index b4304eb7b72..00000000000 --- a/watch/migrations/2022-01-01-000004_active_config/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE active_config diff --git a/watch/migrations/2022-01-01-000004_active_config/up.sql b/watch/migrations/2022-01-01-000004_active_config/up.sql deleted file mode 100644 index 476a0911607..00000000000 --- a/watch/migrations/2022-01-01-000004_active_config/up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE active_config ( - id integer PRIMARY KEY CHECK (id=1), - config_name text NOT NULL, - slots_per_epoch integer NOT NULL -) diff --git a/watch/migrations/2022-01-01-000010_blockprint/down.sql b/watch/migrations/2022-01-01-000010_blockprint/down.sql deleted file mode 100644 index fa53325dad1..00000000000 --- a/watch/migrations/2022-01-01-000010_blockprint/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE blockprint diff --git a/watch/migrations/2022-01-01-000010_blockprint/up.sql b/watch/migrations/2022-01-01-000010_blockprint/up.sql deleted file mode 100644 index 2d5741f50b7..00000000000 --- a/watch/migrations/2022-01-01-000010_blockprint/up.sql +++ /dev/null @@ -1,4 +0,0 @@ -CREATE TABLE blockprint ( - slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, - best_guess text NOT NULL -) diff --git a/watch/migrations/2022-01-01-000011_block_rewards/down.sql b/watch/migrations/2022-01-01-000011_block_rewards/down.sql deleted file mode 100644 index 2dc87995c74..00000000000 --- a/watch/migrations/2022-01-01-000011_block_rewards/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE block_rewards diff --git a/watch/migrations/2022-01-01-000011_block_rewards/up.sql b/watch/migrations/2022-01-01-000011_block_rewards/up.sql deleted file mode 100644 index 47cb4304f06..00000000000 --- a/watch/migrations/2022-01-01-000011_block_rewards/up.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE block_rewards ( - slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, - total integer NOT NULL, - attestation_reward integer NOT NULL, - sync_committee_reward integer NOT NULL -) diff --git a/watch/migrations/2022-01-01-000012_block_packing/down.sql b/watch/migrations/2022-01-01-000012_block_packing/down.sql deleted file mode 100644 index e9e7755e3e0..00000000000 --- a/watch/migrations/2022-01-01-000012_block_packing/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE block_packing diff --git a/watch/migrations/2022-01-01-000012_block_packing/up.sql b/watch/migrations/2022-01-01-000012_block_packing/up.sql deleted file mode 100644 index 63a9925f920..00000000000 --- a/watch/migrations/2022-01-01-000012_block_packing/up.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE block_packing ( - slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, - available integer NOT NULL, - included integer NOT NULL, - prior_skip_slots integer NOT NULL -) diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql deleted file mode 100644 index 0f32b6b4f33..00000000000 --- a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE suboptimal_attestations diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql deleted file mode 100644 index 5352afefc8d..00000000000 --- a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql +++ /dev/null @@ -1,8 +0,0 @@ -CREATE TABLE suboptimal_attestations ( - epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE, - index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE, - source boolean NOT NULL, - head boolean NOT NULL, - target boolean NOT NULL, - PRIMARY KEY(epoch_start_slot, index) -) diff --git a/watch/migrations/2022-01-01-000020_capella/down.sql b/watch/migrations/2022-01-01-000020_capella/down.sql deleted file mode 100644 index 5903b351db9..00000000000 --- a/watch/migrations/2022-01-01-000020_capella/down.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE beacon_blocks -DROP COLUMN withdrawal_count; diff --git a/watch/migrations/2022-01-01-000020_capella/up.sql b/watch/migrations/2022-01-01-000020_capella/up.sql deleted file mode 100644 index b52b4b00998..00000000000 --- a/watch/migrations/2022-01-01-000020_capella/up.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE beacon_blocks -ADD COLUMN withdrawal_count integer; - diff --git a/watch/postgres_docker_compose/compose.yml b/watch/postgres_docker_compose/compose.yml deleted file mode 100644 index eae4de4a2ba..00000000000 --- a/watch/postgres_docker_compose/compose.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: "3" - -services: - postgres: - image: postgres:12.3-alpine - restart: always - environment: - POSTGRES_PASSWORD: postgres - POSTGRES_USER: postgres - volumes: - - postgres:/var/lib/postgresql/data - ports: - - 127.0.0.1:5432:5432 - -volumes: - postgres: diff --git a/watch/src/block_packing/database.rs b/watch/src/block_packing/database.rs deleted file mode 100644 index f7375431cb3..00000000000 --- a/watch/src/block_packing/database.rs +++ /dev/null @@ -1,140 +0,0 @@ -use crate::database::{ - schema::{beacon_blocks, block_packing}, - watch_types::{WatchHash, WatchSlot}, - Error, PgConn, MAX_SIZE_BATCH_INSERT, -}; - -use diesel::prelude::*; -use diesel::{Insertable, Queryable}; -use log::debug; -use serde::{Deserialize, Serialize}; -use std::time::Instant; - -#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = block_packing)] -pub struct WatchBlockPacking { - pub slot: WatchSlot, - pub available: i32, - pub included: i32, - pub prior_skip_slots: i32, -} - -/// Insert a batch of values into the `block_packing` table. -/// -/// On a conflict, it will do nothing, leaving the old value. -pub fn insert_batch_block_packing( - conn: &mut PgConn, - packing: Vec, -) -> Result<(), Error> { - use self::block_packing::dsl::*; - - let mut count = 0; - let timer = Instant::now(); - - for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) { - count += diesel::insert_into(block_packing) - .values(chunk) - .on_conflict_do_nothing() - .execute(conn)?; - } - - let time_taken = timer.elapsed(); - debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}"); - Ok(()) -} - -/// Selects the row from the `block_packing` table where `slot` is minimum. -pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result, Error> { - use self::block_packing::dsl::*; - let timer = Instant::now(); - - let result = block_packing - .order_by(slot.asc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block packing requested: lowest, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects the row from the `block_packing` table where `slot` is maximum. -pub fn get_highest_block_packing(conn: &mut PgConn) -> Result, Error> { - use self::block_packing::dsl::*; - let timer = Instant::now(); - - let result = block_packing - .order_by(slot.desc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block packing requested: highest, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `block_packing` table corresponding to a given `root_query`. -pub fn get_block_packing_by_root( - conn: &mut PgConn, - root_query: WatchHash, -) -> Result, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root}; - use self::block_packing::dsl::*; - let timer = Instant::now(); - - let join = beacon_blocks.inner_join(block_packing); - - let result = join - .select((slot, available, included, prior_skip_slots)) - .filter(root.eq(root_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`. -pub fn get_block_packing_by_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::block_packing::dsl::*; - let timer = Instant::now(); - - let result = block_packing - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding -/// row in `block_packing`. -#[allow(dead_code)] -pub fn get_unknown_block_packing( - conn: &mut PgConn, - slots_per_epoch: u64, -) -> Result>, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; - use self::block_packing::dsl::block_packing; - - let join = beacon_blocks.left_join(block_packing); - - let result = join - .select(slot) - .filter(root.is_null()) - // Block packing cannot be retrieved for epoch 0 so we need to exclude them. - .filter(slot.ge(slots_per_epoch as i32)) - .order_by(slot.desc()) - .nullable() - .load::>(conn)?; - - Ok(result) -} diff --git a/watch/src/block_packing/mod.rs b/watch/src/block_packing/mod.rs deleted file mode 100644 index 5d74fc59799..00000000000 --- a/watch/src/block_packing/mod.rs +++ /dev/null @@ -1,38 +0,0 @@ -pub mod database; -pub mod server; -pub mod updater; - -use crate::database::watch_types::WatchSlot; -use crate::updater::error::Error; - -pub use database::{ - get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, - get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, - WatchBlockPacking, -}; -pub use server::block_packing_routes; - -use eth2::BeaconNodeHttpClient; -use types::Epoch; - -/// Sends a request to `lighthouse/analysis/block_packing`. -/// Formats the response into a vector of `WatchBlockPacking`. -/// -/// Will fail if `start_epoch == 0`. -pub async fn get_block_packing( - bn: &BeaconNodeHttpClient, - start_epoch: Epoch, - end_epoch: Epoch, -) -> Result, Error> { - Ok(bn - .get_lighthouse_analysis_block_packing(start_epoch, end_epoch) - .await? - .into_iter() - .map(|data| WatchBlockPacking { - slot: WatchSlot::from_slot(data.slot), - available: data.available_attestations as i32, - included: data.included_attestations as i32, - prior_skip_slots: data.prior_skip_slots as i32, - }) - .collect()) -} diff --git a/watch/src/block_packing/server.rs b/watch/src/block_packing/server.rs deleted file mode 100644 index 819144562a5..00000000000 --- a/watch/src/block_packing/server.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::block_packing::database::{ - get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking, -}; -use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; -use crate::server::Error; - -use axum::{extract::Path, routing::get, Extension, Json, Router}; -use eth2::types::BlockId; -use std::str::FromStr; - -pub async fn get_block_packing( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { - BlockId::Root(root) => Ok(Json(get_block_packing_by_root( - &mut conn, - WatchHash::from_hash(root), - )?)), - BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot( - &mut conn, - WatchSlot::from_slot(slot), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub fn block_packing_routes() -> Router { - Router::new().route("/v1/blocks/:block/packing", get(get_block_packing)) -} diff --git a/watch/src/block_packing/updater.rs b/watch/src/block_packing/updater.rs deleted file mode 100644 index 34847f6264d..00000000000 --- a/watch/src/block_packing/updater.rs +++ /dev/null @@ -1,211 +0,0 @@ -use crate::database::{self, Error as DbError}; -use crate::updater::{Error, UpdateHandler}; - -use crate::block_packing::get_block_packing; - -use eth2::types::{Epoch, EthSpec}; -use log::{debug, error, warn}; - -const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50; - -impl UpdateHandler { - /// Forward fills the `block_packing` table starting from the entry with the - /// highest slot. - /// - /// It constructs a request to the `get_block_packing` API with: - /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block) - /// `end_epoch` -> epoch of highest beacon block - /// - /// It will resync the latest epoch if it is not fully filled. - /// That is, `if highest_filled_slot % slots_per_epoch != 31` - /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be - //// resynced during the next head update. - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. - pub async fn fill_block_packing(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - - // Get the slot of the highest entry in the `block_packing` table. - let highest_filled_slot_opt = if self.config.block_packing { - database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot) - } else { - return Err(Error::NotEnabled("block_packing".to_string())); - }; - - let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { - if highest_filled_slot.as_slot() % self.slots_per_epoch - == self.slots_per_epoch.saturating_sub(1) - { - // The whole epoch is filled so we can begin syncing the next one. - highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1 - } else { - // The epoch is only partially synced. Try to sync it fully. - highest_filled_slot.as_slot().epoch(self.slots_per_epoch) - } - } else { - // No entries in the `block_packing` table. Use `beacon_blocks` instead. - if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? { - lowest_beacon_block - .slot - .as_slot() - .epoch(self.slots_per_epoch) - } else { - // There are no blocks in the database, do not fill the `block_packing` table. - warn!("Refusing to fill block packing as there are no blocks in the database"); - return Ok(()); - } - }; - - // The `get_block_packing` API endpoint cannot accept `start_epoch == 0`. - if start_epoch == 0 { - start_epoch += 1 - } - - if let Some(highest_block_slot) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) - { - let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch); - - if start_epoch > end_epoch { - debug!("Block packing is up to date with the head of the database"); - return Ok(()); - } - - // Ensure the size of the request does not exceed the maximum allowed value. - if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { - end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING - } - - if let Some(lowest_block_slot) = - database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) - { - let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; - - // Since we pull a full epoch of data but are not guaranteed to have all blocks of - // that epoch available, only insert blocks with corresponding `beacon_block`s. - packing.retain(|packing| { - packing.slot.as_slot() >= lowest_block_slot - && packing.slot.as_slot() <= highest_block_slot - }); - database::insert_batch_block_packing(&mut conn, packing)?; - } else { - return Err(Error::Database(DbError::Other( - "Database did not return a lowest block when one exists".to_string(), - ))); - } - } else { - // There are no blocks in the `beacon_blocks` database, but there are entries in the - // `block_packing` table. This is a critical failure. It usually means someone has - // manually tampered with the database tables and should not occur during normal - // operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - - Ok(()) - } - - /// Backfill the `block_packing` table starting from the entry with the lowest slot. - /// - /// It constructs a request to the `get_block_packing` function with: - /// `start_epoch` -> epoch of lowest_beacon_block - /// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block) - /// - /// It will resync the lowest epoch if it is not fully filled. - /// That is, `if lowest_filled_slot % slots_per_epoch != 0` - /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be - //// resynced during the next head update. - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. - pub async fn backfill_block_packing(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - let max_block_packing_backfill = self.config.max_backfill_size_epochs; - - // Get the slot of the lowest entry in the `block_packing` table. - let lowest_filled_slot_opt = if self.config.block_packing { - database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot) - } else { - return Err(Error::NotEnabled("block_packing".to_string())); - }; - - let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { - if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 { - lowest_filled_slot - .as_slot() - .epoch(self.slots_per_epoch) - .saturating_sub(Epoch::new(1)) - } else { - // The epoch is only partially synced. Try to sync it fully. - lowest_filled_slot.as_slot().epoch(self.slots_per_epoch) - } - } else { - // No entries in the `block_packing` table. Use `beacon_blocks` instead. - if let Some(highest_beacon_block) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) - { - highest_beacon_block.as_slot().epoch(self.slots_per_epoch) - } else { - // There are no blocks in the database, do not backfill the `block_packing` table. - warn!("Refusing to backfill block packing as there are no blocks in the database"); - return Ok(()); - } - }; - - if end_epoch <= 1 { - debug!("Block packing backfill is complete"); - return Ok(()); - } - - if let Some(lowest_block_slot) = - database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) - { - let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch); - - if start_epoch >= end_epoch { - debug!("Block packing is up to date with the base of the database"); - return Ok(()); - } - - // Ensure that the request range does not exceed `max_block_packing_backfill` or - // `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. - if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) { - start_epoch = end_epoch.saturating_sub(max_block_packing_backfill) - } - if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { - start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) - } - - // The `block_packing` API cannot accept `start_epoch == 0`. - if start_epoch == 0 { - start_epoch += 1 - } - - if let Some(highest_block_slot) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) - { - let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; - - // Only insert blocks with corresponding `beacon_block`s. - packing.retain(|packing| { - packing.slot.as_slot() >= lowest_block_slot - && packing.slot.as_slot() <= highest_block_slot - }); - - database::insert_batch_block_packing(&mut conn, packing)?; - } else { - return Err(Error::Database(DbError::Other( - "Database did not return a lowest block when one exists".to_string(), - ))); - } - } else { - // There are no blocks in the `beacon_blocks` database, but there are entries in the - // `block_packing` table. This is a critical failure. It usually means someone has - // manually tampered with the database tables and should not occur during normal - // operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - - Ok(()) - } -} diff --git a/watch/src/block_rewards/database.rs b/watch/src/block_rewards/database.rs deleted file mode 100644 index a2bf49f3e4d..00000000000 --- a/watch/src/block_rewards/database.rs +++ /dev/null @@ -1,137 +0,0 @@ -use crate::database::{ - schema::{beacon_blocks, block_rewards}, - watch_types::{WatchHash, WatchSlot}, - Error, PgConn, MAX_SIZE_BATCH_INSERT, -}; - -use diesel::prelude::*; -use diesel::{Insertable, Queryable}; -use log::debug; -use serde::{Deserialize, Serialize}; -use std::time::Instant; - -#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = block_rewards)] -pub struct WatchBlockRewards { - pub slot: WatchSlot, - pub total: i32, - pub attestation_reward: i32, - pub sync_committee_reward: i32, -} - -/// Insert a batch of values into the `block_rewards` table. -/// -/// On a conflict, it will do nothing, leaving the old value. -pub fn insert_batch_block_rewards( - conn: &mut PgConn, - rewards: Vec, -) -> Result<(), Error> { - use self::block_rewards::dsl::*; - - let mut count = 0; - let timer = Instant::now(); - - for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) { - count += diesel::insert_into(block_rewards) - .values(chunk) - .on_conflict_do_nothing() - .execute(conn)?; - } - - let time_taken = timer.elapsed(); - debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}"); - Ok(()) -} - -/// Selects the row from the `block_rewards` table where `slot` is minimum. -pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result, Error> { - use self::block_rewards::dsl::*; - let timer = Instant::now(); - - let result = block_rewards - .order_by(slot.asc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block rewards requested: lowest, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects the row from the `block_rewards` table where `slot` is maximum. -pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result, Error> { - use self::block_rewards::dsl::*; - let timer = Instant::now(); - - let result = block_rewards - .order_by(slot.desc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block rewards requested: highest, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`. -pub fn get_block_rewards_by_root( - conn: &mut PgConn, - root_query: WatchHash, -) -> Result, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root}; - use self::block_rewards::dsl::*; - let timer = Instant::now(); - - let join = beacon_blocks.inner_join(block_rewards); - - let result = join - .select((slot, total, attestation_reward, sync_committee_reward)) - .filter(root.eq(root_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`. -pub fn get_block_rewards_by_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::block_rewards::dsl::*; - let timer = Instant::now(); - - let result = block_rewards - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding -/// row in `block_rewards`. -#[allow(dead_code)] -pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result>, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; - use self::block_rewards::dsl::block_rewards; - - let join = beacon_blocks.left_join(block_rewards); - - let result = join - .select(slot) - .filter(root.is_null()) - // Block rewards cannot be retrieved for `slot == 0` so we need to exclude it. - .filter(slot.ne(0)) - .order_by(slot.desc()) - .nullable() - .load::>(conn)?; - - Ok(result) -} diff --git a/watch/src/block_rewards/mod.rs b/watch/src/block_rewards/mod.rs deleted file mode 100644 index 0dac88ea58d..00000000000 --- a/watch/src/block_rewards/mod.rs +++ /dev/null @@ -1,38 +0,0 @@ -pub mod database; -mod server; -mod updater; - -use crate::database::watch_types::WatchSlot; -use crate::updater::error::Error; - -pub use database::{ - get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, - get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, - WatchBlockRewards, -}; -pub use server::block_rewards_routes; - -use eth2::BeaconNodeHttpClient; -use types::Slot; - -/// Sends a request to `lighthouse/analysis/block_rewards`. -/// Formats the response into a vector of `WatchBlockRewards`. -/// -/// Will fail if `start_slot == 0`. -pub async fn get_block_rewards( - bn: &BeaconNodeHttpClient, - start_slot: Slot, - end_slot: Slot, -) -> Result, Error> { - Ok(bn - .get_lighthouse_analysis_block_rewards(start_slot, end_slot) - .await? - .into_iter() - .map(|data| WatchBlockRewards { - slot: WatchSlot::from_slot(data.meta.slot), - total: data.total as i32, - attestation_reward: data.attestation_rewards.total as i32, - sync_committee_reward: data.sync_committee_rewards as i32, - }) - .collect()) -} diff --git a/watch/src/block_rewards/server.rs b/watch/src/block_rewards/server.rs deleted file mode 100644 index 480346e25b3..00000000000 --- a/watch/src/block_rewards/server.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::block_rewards::database::{ - get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards, -}; -use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; -use crate::server::Error; - -use axum::{extract::Path, routing::get, Extension, Json, Router}; -use eth2::types::BlockId; -use std::str::FromStr; - -pub async fn get_block_rewards( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { - BlockId::Root(root) => Ok(Json(get_block_rewards_by_root( - &mut conn, - WatchHash::from_hash(root), - )?)), - BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot( - &mut conn, - WatchSlot::from_slot(slot), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub fn block_rewards_routes() -> Router { - Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards)) -} diff --git a/watch/src/block_rewards/updater.rs b/watch/src/block_rewards/updater.rs deleted file mode 100644 index e2893ad0fea..00000000000 --- a/watch/src/block_rewards/updater.rs +++ /dev/null @@ -1,157 +0,0 @@ -use crate::database::{self, Error as DbError}; -use crate::updater::{Error, UpdateHandler}; - -use crate::block_rewards::get_block_rewards; - -use eth2::types::EthSpec; -use log::{debug, error, warn}; - -const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600; - -impl UpdateHandler { - /// Forward fills the `block_rewards` table starting from the entry with the - /// highest slot. - /// - /// It constructs a request to the `get_block_rewards` API with: - /// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block) - /// `end_slot` -> highest beacon block - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. - pub async fn fill_block_rewards(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - - // Get the slot of the highest entry in the `block_rewards` table. - let highest_filled_slot_opt = if self.config.block_rewards { - database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot) - } else { - return Err(Error::NotEnabled("block_rewards".to_string())); - }; - - let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt { - highest_filled_slot.as_slot() + 1 - } else { - // No entries in the `block_rewards` table. Use `beacon_blocks` instead. - if let Some(lowest_beacon_block) = - database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) - { - lowest_beacon_block.as_slot() - } else { - // There are no blocks in the database, do not fill the `block_rewards` table. - warn!("Refusing to fill block rewards as there are no blocks in the database"); - return Ok(()); - } - }; - - // The `block_rewards` API cannot accept `start_slot == 0`. - if start_slot == 0 { - start_slot += 1; - } - - if let Some(highest_beacon_block) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) - { - let mut end_slot = highest_beacon_block.as_slot(); - - if start_slot > end_slot { - debug!("Block rewards are up to date with the head of the database"); - return Ok(()); - } - - // Ensure the size of the request does not exceed the maximum allowed value. - if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { - end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS - } - - let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; - database::insert_batch_block_rewards(&mut conn, rewards)?; - } else { - // There are no blocks in the `beacon_blocks` database, but there are entries in the - // `block_rewards` table. This is a critical failure. It usually means someone has - // manually tampered with the database tables and should not occur during normal - // operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - - Ok(()) - } - - /// Backfill the `block_rewards` tables starting from the entry with the - /// lowest slot. - /// - /// It constructs a request to the `get_block_rewards` API with: - /// `start_slot` -> lowest_beacon_block - /// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block) - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. - pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch; - - // Get the slot of the lowest entry in the `block_rewards` table. - let lowest_filled_slot_opt = if self.config.block_rewards { - database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot) - } else { - return Err(Error::NotEnabled("block_rewards".to_string())); - }; - - let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { - lowest_filled_slot.as_slot().saturating_sub(1_u64) - } else { - // No entries in the `block_rewards` table. Use `beacon_blocks` instead. - if let Some(highest_beacon_block) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) - { - highest_beacon_block.as_slot() - } else { - // There are no blocks in the database, do not backfill the `block_rewards` table. - warn!("Refusing to backfill block rewards as there are no blocks in the database"); - return Ok(()); - } - }; - - if end_slot <= 1 { - debug!("Block rewards backfill is complete"); - return Ok(()); - } - - if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { - let mut start_slot = lowest_block_slot.slot.as_slot(); - - if start_slot >= end_slot { - debug!("Block rewards are up to date with the base of the database"); - return Ok(()); - } - - // Ensure that the request range does not exceed `max_block_reward_backfill` or - // `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. - if start_slot < end_slot.saturating_sub(max_block_reward_backfill) { - start_slot = end_slot.saturating_sub(max_block_reward_backfill) - } - - if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { - start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) - } - - // The `block_rewards` API cannot accept `start_slot == 0`. - if start_slot == 0 { - start_slot += 1 - } - - let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; - - if self.config.block_rewards { - database::insert_batch_block_rewards(&mut conn, rewards)?; - } - } else { - // There are no blocks in the `beacon_blocks` database, but there are entries in the - // `block_rewards` table. This is a critical failure. It usually means someone has - // manually tampered with the database tables and should not occur during normal - // operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - - Ok(()) - } -} diff --git a/watch/src/blockprint/config.rs b/watch/src/blockprint/config.rs deleted file mode 100644 index 721fa7cb197..00000000000 --- a/watch/src/blockprint/config.rs +++ /dev/null @@ -1,40 +0,0 @@ -use serde::{Deserialize, Serialize}; - -pub const fn enabled() -> bool { - false -} - -pub const fn url() -> Option { - None -} - -pub const fn username() -> Option { - None -} - -pub const fn password() -> Option { - None -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - #[serde(default = "enabled")] - pub enabled: bool, - #[serde(default = "url")] - pub url: Option, - #[serde(default = "username")] - pub username: Option, - #[serde(default = "password")] - pub password: Option, -} - -impl Default for Config { - fn default() -> Self { - Config { - enabled: enabled(), - url: url(), - username: username(), - password: password(), - } - } -} diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs deleted file mode 100644 index f0bc3f8ac86..00000000000 --- a/watch/src/blockprint/database.rs +++ /dev/null @@ -1,225 +0,0 @@ -use crate::database::{ - self, - schema::{beacon_blocks, blockprint}, - watch_types::{WatchHash, WatchSlot}, - Error, PgConn, MAX_SIZE_BATCH_INSERT, -}; - -use diesel::prelude::*; -use diesel::sql_types::{Integer, Text}; -use diesel::{Insertable, Queryable}; -use log::debug; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::time::Instant; - -type WatchConsensusClient = String; -pub fn list_consensus_clients() -> Vec { - vec![ - "Lighthouse".to_string(), - "Lodestar".to_string(), - "Nimbus".to_string(), - "Prysm".to_string(), - "Teku".to_string(), - "Unknown".to_string(), - ] -} - -#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = blockprint)] -pub struct WatchBlockprint { - pub slot: WatchSlot, - pub best_guess: WatchConsensusClient, -} - -#[derive(Debug, QueryableByName, diesel::FromSqlRow)] -#[allow(dead_code)] -pub struct WatchValidatorBlockprint { - #[diesel(sql_type = Integer)] - pub proposer_index: i32, - #[diesel(sql_type = Text)] - pub best_guess: WatchConsensusClient, - #[diesel(sql_type = Integer)] - pub slot: WatchSlot, -} - -/// Insert a batch of values into the `blockprint` table. -/// -/// On a conflict, it will do nothing, leaving the old value. -pub fn insert_batch_blockprint( - conn: &mut PgConn, - prints: Vec, -) -> Result<(), Error> { - use self::blockprint::dsl::*; - - let mut count = 0; - let timer = Instant::now(); - - for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) { - count += diesel::insert_into(blockprint) - .values(chunk) - .on_conflict_do_nothing() - .execute(conn)?; - } - - let time_taken = timer.elapsed(); - debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}"); - Ok(()) -} - -/// Selects the row from the `blockprint` table where `slot` is minimum. -pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result, Error> { - use self::blockprint::dsl::*; - let timer = Instant::now(); - - let result = blockprint - .order_by(slot.asc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Blockprint requested: lowest, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects the row from the `blockprint` table where `slot` is maximum. -pub fn get_highest_blockprint(conn: &mut PgConn) -> Result, Error> { - use self::blockprint::dsl::*; - let timer = Instant::now(); - - let result = blockprint - .order_by(slot.desc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Blockprint requested: highest, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `blockprint` table corresponding to a given `root_query`. -pub fn get_blockprint_by_root( - conn: &mut PgConn, - root_query: WatchHash, -) -> Result, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root}; - use self::blockprint::dsl::*; - let timer = Instant::now(); - - let join = beacon_blocks.inner_join(blockprint); - - let result = join - .select((slot, best_guess)) - .filter(root.eq(root_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`. -pub fn get_blockprint_by_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::blockprint::dsl::*; - let timer = Instant::now(); - - let result = blockprint - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding -/// row in `blockprint`. -#[allow(dead_code)] -pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result>, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; - use self::blockprint::dsl::blockprint; - - let join = beacon_blocks.left_join(blockprint); - - let result = join - .select(slot) - .filter(root.is_null()) - .order_by(slot.desc()) - .nullable() - .load::>(conn)?; - - Ok(result) -} - -/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before -/// `target_slot`. -/// Inserts `"Unknown" if no prior proposals exist. -pub fn construct_validator_blockprints_at_slot( - conn: &mut PgConn, - target_slot: WatchSlot, - slots_per_epoch: u64, -) -> Result, Error> { - use self::blockprint::dsl::{blockprint, slot}; - - let total_validators = - database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)? - as usize; - - let mut blockprint_map = HashMap::with_capacity(total_validators); - - let latest_proposals = - database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?; - - let latest_proposal_slots: Vec = latest_proposals.clone().into_keys().collect(); - - let result = blockprint - .filter(slot.eq_any(latest_proposal_slots)) - .load::(conn)?; - - // Insert the validators which have available blockprints. - for print in result { - if let Some(proposer) = latest_proposals.get(&print.slot) { - blockprint_map.insert(*proposer, print.best_guess); - } - } - - // Insert the rest of the unknown validators. - for validator_index in 0..total_validators { - blockprint_map - .entry(validator_index as i32) - .or_insert_with(|| "Unknown".to_string()); - } - - Ok(blockprint_map) -} - -/// Counts the number of occurances of each `client` present in the `validators` table at or before some -/// `target_slot`. -pub fn get_validators_clients_at_slot( - conn: &mut PgConn, - target_slot: WatchSlot, - slots_per_epoch: u64, -) -> Result, Error> { - let mut client_map: HashMap = HashMap::new(); - - // This includes all validators which were activated at or before `target_slot`. - let validator_blockprints = - construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?; - - for client in list_consensus_clients() { - let count = validator_blockprints - .iter() - .filter(|(_, v)| (*v).clone() == client) - .count(); - client_map.insert(client, count); - } - - Ok(client_map) -} diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs deleted file mode 100644 index 319090c6565..00000000000 --- a/watch/src/blockprint/mod.rs +++ /dev/null @@ -1,150 +0,0 @@ -pub mod database; -pub mod server; -pub mod updater; - -mod config; - -use crate::database::WatchSlot; - -use eth2::SensitiveUrl; -use reqwest::{Client, Response, Url}; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::time::Duration; -use types::Slot; - -pub use config::Config; -pub use database::{ - get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, - get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, - WatchBlockprint, -}; -pub use server::blockprint_routes; - -const TIMEOUT: Duration = Duration::from_secs(50); - -#[derive(Debug)] -#[allow(dead_code)] -pub enum Error { - Reqwest(reqwest::Error), - Url(url::ParseError), - BlockprintNotSynced, - Other(String), -} - -impl From for Error { - fn from(e: reqwest::Error) -> Self { - Error::Reqwest(e) - } -} - -impl From for Error { - fn from(e: url::ParseError) -> Self { - Error::Url(e) - } -} - -pub struct WatchBlockprintClient { - pub client: Client, - pub server: SensitiveUrl, - pub username: Option, - pub password: Option, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct BlockprintSyncingResponse { - pub greatest_block_slot: Slot, - pub synced: bool, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct BlockprintResponse { - pub proposer_index: i32, - pub slot: Slot, - pub best_guess_single: String, -} - -impl WatchBlockprintClient { - async fn get(&self, url: Url) -> Result { - let mut builder = self.client.get(url).timeout(TIMEOUT); - if let Some(username) = &self.username { - builder = builder.basic_auth(username, self.password.as_ref()); - } - let response = builder.send().await.map_err(Error::Reqwest)?; - - if !response.status().is_success() { - return Err(Error::Other(response.text().await?)); - } - - Ok(response) - } - - // Returns the `greatest_block_slot` as reported by the Blockprint server. - // Will error if the Blockprint server is not synced. - #[allow(dead_code)] - pub async fn ensure_synced(&self) -> Result { - let url = self.server.full.join("sync/")?.join("status")?; - - let response = self.get(url).await?; - - let result = response.json::().await?; - if !result.synced { - return Err(Error::BlockprintNotSynced); - } - - Ok(result.greatest_block_slot) - } - - // Pulls the latest blockprint for all validators. - #[allow(dead_code)] - pub async fn blockprint_all_validators( - &self, - highest_validator: i32, - ) -> Result, Error> { - let url = self - .server - .full - .join("validator/")? - .join("blocks/")? - .join("latest")?; - - let response = self.get(url).await?; - - let mut result = response.json::>().await?; - result.retain(|print| print.proposer_index <= highest_validator); - - let mut map: HashMap = HashMap::with_capacity(result.len()); - for print in result { - map.insert(print.proposer_index, print.best_guess_single); - } - - Ok(map) - } - - // Construct a request to the Blockprint server for a range of slots between `start_slot` and - // `end_slot`. - pub async fn get_blockprint( - &self, - start_slot: Slot, - end_slot: Slot, - ) -> Result, Error> { - let url = self - .server - .full - .join("blocks/")? - .join(&format!("{start_slot}/{end_slot}"))?; - - let response = self.get(url).await?; - - let result = response - .json::>() - .await? - .iter() - .map(|response| WatchBlockprint { - slot: WatchSlot::from_slot(response.slot), - best_guess: response.best_guess_single.clone(), - }) - .collect(); - Ok(result) - } -} diff --git a/watch/src/blockprint/server.rs b/watch/src/blockprint/server.rs deleted file mode 100644 index 488af157174..00000000000 --- a/watch/src/blockprint/server.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::blockprint::database::{ - get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint, -}; -use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; -use crate::server::Error; - -use axum::{extract::Path, routing::get, Extension, Json, Router}; -use eth2::types::BlockId; -use std::str::FromStr; - -pub async fn get_blockprint( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { - BlockId::Root(root) => Ok(Json(get_blockprint_by_root( - &mut conn, - WatchHash::from_hash(root), - )?)), - BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot( - &mut conn, - WatchSlot::from_slot(slot), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub fn blockprint_routes() -> Router { - Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint)) -} diff --git a/watch/src/blockprint/updater.rs b/watch/src/blockprint/updater.rs deleted file mode 100644 index 7ec56dd9c81..00000000000 --- a/watch/src/blockprint/updater.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::database::{self, Error as DbError}; -use crate::updater::{Error, UpdateHandler}; - -use eth2::types::EthSpec; -use log::{debug, error, warn}; - -const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600; - -impl UpdateHandler { - /// Forward fills the `blockprint` table starting from the entry with the - /// highest slot. - /// - /// It constructs a request to the `get_blockprint` API with: - /// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block) - /// `end_slot` -> highest beacon block - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. - pub async fn fill_blockprint(&mut self) -> Result<(), Error> { - // Ensure blockprint in enabled. - if let Some(blockprint_client) = &self.blockprint { - let mut conn = database::get_connection(&self.pool)?; - - // Get the slot of the highest entry in the `blockprint` table. - let mut start_slot = if let Some(highest_filled_slot) = - database::get_highest_blockprint(&mut conn)?.map(|print| print.slot) - { - highest_filled_slot.as_slot() + 1 - } else { - // No entries in the `blockprint` table. Use `beacon_blocks` instead. - if let Some(lowest_beacon_block) = - database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) - { - lowest_beacon_block.as_slot() - } else { - // There are no blocks in the database, do not fill the `blockprint` table. - warn!("Refusing to fill blockprint as there are no blocks in the database"); - return Ok(()); - } - }; - - // The `blockprint` API cannot accept `start_slot == 0`. - if start_slot == 0 { - start_slot += 1; - } - - if let Some(highest_beacon_block) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) - { - let mut end_slot = highest_beacon_block.as_slot(); - - if start_slot > end_slot { - debug!("Blockprint is up to date with the head of the database"); - return Ok(()); - } - - // Ensure the size of the request does not exceed the maximum allowed value. - if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { - end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT - } - - let mut prints = blockprint_client - .get_blockprint(start_slot, end_slot) - .await?; - - // Ensure the prints returned from blockprint are for slots which exist in the - // `beacon_blocks` table. - prints.retain(|print| { - database::get_beacon_block_by_slot(&mut conn, print.slot) - .ok() - .flatten() - .is_some() - }); - - database::insert_batch_blockprint(&mut conn, prints)?; - } else { - // There are no blocks in the `beacon_blocks` database, but there are entries in either - // `blockprint` table. This is a critical failure. It usually means - // someone has manually tampered with the database tables and should not occur during - // normal operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - } - - Ok(()) - } - - /// Backfill the `blockprint` table starting from the entry with the lowest slot. - /// - /// It constructs a request to the `get_blockprint` API with: - /// `start_slot` -> lowest_beacon_block - /// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block) - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. - pub async fn backfill_blockprint(&mut self) -> Result<(), Error> { - // Ensure blockprint in enabled. - if let Some(blockprint_client) = &self.blockprint { - let mut conn = database::get_connection(&self.pool)?; - let max_blockprint_backfill = - self.config.max_backfill_size_epochs * self.slots_per_epoch; - - // Get the slot of the lowest entry in the `blockprint` table. - let end_slot = if let Some(lowest_filled_slot) = - database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot) - { - lowest_filled_slot.as_slot().saturating_sub(1_u64) - } else { - // No entries in the `blockprint` table. Use `beacon_blocks` instead. - if let Some(highest_beacon_block) = - database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) - { - highest_beacon_block.as_slot() - } else { - // There are no blocks in the database, do not backfill the `blockprint` table. - warn!("Refusing to backfill blockprint as there are no blocks in the database"); - return Ok(()); - } - }; - - if end_slot <= 1 { - debug!("Blockprint backfill is complete"); - return Ok(()); - } - - if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { - let mut start_slot = lowest_block_slot.slot.as_slot(); - - if start_slot >= end_slot { - debug!("Blockprint are up to date with the base of the database"); - return Ok(()); - } - - // Ensure that the request range does not exceed `max_blockprint_backfill` or - // `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. - if start_slot < end_slot.saturating_sub(max_blockprint_backfill) { - start_slot = end_slot.saturating_sub(max_blockprint_backfill) - } - - if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { - start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) - } - - // The `blockprint` API cannot accept `start_slot == 0`. - if start_slot == 0 { - start_slot += 1 - } - - let mut prints = blockprint_client - .get_blockprint(start_slot, end_slot) - .await?; - - // Ensure the prints returned from blockprint are for slots which exist in the - // `beacon_blocks` table. - prints.retain(|print| { - database::get_beacon_block_by_slot(&mut conn, print.slot) - .ok() - .flatten() - .is_some() - }); - - database::insert_batch_blockprint(&mut conn, prints)?; - } else { - // There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint` - // table. This is a critical failure. It usually means someone has manually tampered with the - // database tables and should not occur during normal operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - } - Ok(()) - } -} diff --git a/watch/src/cli.rs b/watch/src/cli.rs deleted file mode 100644 index b7179efe5d4..00000000000 --- a/watch/src/cli.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::{config::Config, logger, server, updater}; -use clap::{Arg, ArgAction, Command}; -use clap_utils::get_color_style; - -pub const SERVE: &str = "serve"; -pub const RUN_UPDATER: &str = "run-updater"; -pub const CONFIG: &str = "config"; - -fn run_updater() -> Command { - Command::new(RUN_UPDATER).styles(get_color_style()) -} - -fn serve() -> Command { - Command::new(SERVE).styles(get_color_style()) -} - -pub fn app() -> Command { - Command::new("beacon_watch_daemon") - .author("Sigma Prime ") - .styles(get_color_style()) - .arg( - Arg::new(CONFIG) - .long(CONFIG) - .value_name("PATH_TO_CONFIG") - .help("Path to configuration file") - .action(ArgAction::Set) - .global(true), - ) - .subcommand(run_updater()) - .subcommand(serve()) -} - -pub async fn run() -> Result<(), String> { - let matches = app().get_matches(); - - let config = match matches.get_one::(CONFIG) { - Some(path) => Config::load_from_file(path.to_string())?, - None => Config::default(), - }; - - logger::init_logger(&config.log_level); - - match matches.subcommand() { - Some((RUN_UPDATER, _)) => updater::run_updater(config) - .await - .map_err(|e| format!("Failure: {:?}", e)), - Some((SERVE, _)) => server::serve(config) - .await - .map_err(|e| format!("Failure: {:?}", e)), - _ => Err("Unsupported subcommand. See --help".into()), - } -} diff --git a/watch/src/client.rs b/watch/src/client.rs deleted file mode 100644 index 43aaccde343..00000000000 --- a/watch/src/client.rs +++ /dev/null @@ -1,178 +0,0 @@ -use crate::block_packing::WatchBlockPacking; -use crate::block_rewards::WatchBlockRewards; -use crate::database::models::{ - WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator, -}; -use crate::suboptimal_attestations::WatchAttestation; - -use eth2::types::BlockId; -use reqwest::Client; -use serde::de::DeserializeOwned; -use types::Hash256; -use url::Url; - -#[derive(Debug)] -pub enum Error { - Reqwest(reqwest::Error), - Url(url::ParseError), -} - -impl From for Error { - fn from(e: reqwest::Error) -> Self { - Error::Reqwest(e) - } -} - -impl From for Error { - fn from(e: url::ParseError) -> Self { - Error::Url(e) - } -} - -pub struct WatchHttpClient { - pub client: Client, - pub server: Url, -} - -impl WatchHttpClient { - async fn get_opt(&self, url: Url) -> Result, Error> { - let response = self.client.get(url).send().await?; - - if response.status() == 404 { - Ok(None) - } else { - response - .error_for_status()? - .json() - .await - .map_err(Into::into) - } - } - - pub async fn get_beacon_blocks( - &self, - block_id: BlockId, - ) -> Result, Error> { - let url = self - .server - .join("v1/")? - .join("blocks/")? - .join(&block_id.to_string())?; - - self.get_opt(url).await - } - - pub async fn get_lowest_canonical_slot(&self) -> Result, Error> { - let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?; - - self.get_opt(url).await - } - - pub async fn get_highest_canonical_slot(&self) -> Result, Error> { - let url = self.server.join("v1/")?.join("slots/")?.join("highest")?; - - self.get_opt(url).await - } - - pub async fn get_lowest_beacon_block(&self) -> Result, Error> { - let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?; - - self.get_opt(url).await - } - - pub async fn get_highest_beacon_block(&self) -> Result, Error> { - let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?; - - self.get_opt(url).await - } - - pub async fn get_next_beacon_block( - &self, - parent: Hash256, - ) -> Result, Error> { - let url = self - .server - .join("v1/")? - .join("blocks/")? - .join(&format!("{parent:?}/"))? - .join("next")?; - - self.get_opt(url).await - } - - pub async fn get_validator_by_index( - &self, - index: i32, - ) -> Result, Error> { - let url = self - .server - .join("v1/")? - .join("validators/")? - .join(&format!("{index}"))?; - - self.get_opt(url).await - } - - pub async fn get_proposer_info( - &self, - block_id: BlockId, - ) -> Result, Error> { - let url = self - .server - .join("v1/")? - .join("blocks/")? - .join(&format!("{block_id}/"))? - .join("proposer")?; - - self.get_opt(url).await - } - - pub async fn get_block_reward( - &self, - block_id: BlockId, - ) -> Result, Error> { - let url = self - .server - .join("v1/")? - .join("blocks/")? - .join(&format!("{block_id}/"))? - .join("rewards")?; - - self.get_opt(url).await - } - - pub async fn get_block_packing( - &self, - block_id: BlockId, - ) -> Result, Error> { - let url = self - .server - .join("v1/")? - .join("blocks/")? - .join(&format!("{block_id}/"))? - .join("packing")?; - - self.get_opt(url).await - } - - pub async fn get_all_validators(&self) -> Result>, Error> { - let url = self.server.join("v1/")?.join("validators/")?.join("all")?; - - self.get_opt(url).await - } - - pub async fn get_attestations( - &self, - epoch: i32, - ) -> Result>, Error> { - let url = self - .server - .join("v1/")? - .join("validators/")? - .join("all/")? - .join("attestation/")? - .join(&format!("{epoch}"))?; - - self.get_opt(url).await - } -} diff --git a/watch/src/config.rs b/watch/src/config.rs deleted file mode 100644 index 4e61f9df9ca..00000000000 --- a/watch/src/config.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::blockprint::Config as BlockprintConfig; -use crate::database::Config as DatabaseConfig; -use crate::server::Config as ServerConfig; -use crate::updater::Config as UpdaterConfig; - -use serde::{Deserialize, Serialize}; -use std::fs::File; - -pub const LOG_LEVEL: &str = "debug"; - -fn log_level() -> String { - LOG_LEVEL.to_string() -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - #[serde(default)] - pub blockprint: BlockprintConfig, - #[serde(default)] - pub database: DatabaseConfig, - #[serde(default)] - pub server: ServerConfig, - #[serde(default)] - pub updater: UpdaterConfig, - /// The minimum severity for log messages. - #[serde(default = "log_level")] - pub log_level: String, -} - -impl Default for Config { - fn default() -> Self { - Self { - blockprint: BlockprintConfig::default(), - database: DatabaseConfig::default(), - server: ServerConfig::default(), - updater: UpdaterConfig::default(), - log_level: log_level(), - } - } -} - -impl Config { - pub fn load_from_file(path_to_file: String) -> Result { - let file = - File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?; - let config: Config = serde_yaml::from_reader(file) - .map_err(|e| format!("Error parsing config file: {:?}", e))?; - Ok(config) - } -} diff --git a/watch/src/database/compat.rs b/watch/src/database/compat.rs deleted file mode 100644 index e3e9e0df6fe..00000000000 --- a/watch/src/database/compat.rs +++ /dev/null @@ -1,47 +0,0 @@ -//! Implementations of PostgreSQL compatibility traits. -use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot}; -use diesel::deserialize::{self, FromSql}; -use diesel::pg::{Pg, PgValue}; -use diesel::serialize::{self, Output, ToSql}; -use diesel::sql_types::{Binary, Integer}; - -macro_rules! impl_to_from_sql_int { - ($type:ty) => { - impl ToSql for $type - where - i32: ToSql, - { - fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { - let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?; - >::to_sql(&v, &mut out.reborrow()) - } - } - - impl FromSql for $type { - fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { - Ok(Self::new(i32::from_sql(bytes)? as u64)) - } - } - }; -} - -macro_rules! impl_to_from_sql_binary { - ($type:ty) => { - impl ToSql for $type { - fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { - let b = self.as_bytes(); - <&[u8] as ToSql>::to_sql(&b, &mut out.reborrow()) - } - } - - impl FromSql for $type { - fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { - Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into()) - } - } - }; -} - -impl_to_from_sql_int!(WatchSlot); -impl_to_from_sql_binary!(WatchHash); -impl_to_from_sql_binary!(WatchPK); diff --git a/watch/src/database/config.rs b/watch/src/database/config.rs deleted file mode 100644 index dc0c70832f4..00000000000 --- a/watch/src/database/config.rs +++ /dev/null @@ -1,74 +0,0 @@ -use serde::{Deserialize, Serialize}; - -pub const USER: &str = "postgres"; -pub const PASSWORD: &str = "postgres"; -pub const DBNAME: &str = "dev"; -pub const DEFAULT_DBNAME: &str = "postgres"; -pub const HOST: &str = "localhost"; -pub const fn port() -> u16 { - 5432 -} -pub const fn connect_timeout_millis() -> u64 { - 2_000 // 2s -} - -fn user() -> String { - USER.to_string() -} - -fn password() -> String { - PASSWORD.to_string() -} - -fn dbname() -> String { - DBNAME.to_string() -} - -fn default_dbname() -> String { - DEFAULT_DBNAME.to_string() -} - -fn host() -> String { - HOST.to_string() -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - #[serde(default = "user")] - pub user: String, - #[serde(default = "password")] - pub password: String, - #[serde(default = "dbname")] - pub dbname: String, - #[serde(default = "default_dbname")] - pub default_dbname: String, - #[serde(default = "host")] - pub host: String, - #[serde(default = "port")] - pub port: u16, - #[serde(default = "connect_timeout_millis")] - pub connect_timeout_millis: u64, -} - -impl Default for Config { - fn default() -> Self { - Self { - user: user(), - password: password(), - dbname: dbname(), - default_dbname: default_dbname(), - host: host(), - port: port(), - connect_timeout_millis: connect_timeout_millis(), - } - } -} - -impl Config { - pub fn build_database_url(&self) -> String { - format!( - "postgres://{}:{}@{}:{}/{}", - self.user, self.password, self.host, self.port, self.dbname - ) - } -} diff --git a/watch/src/database/error.rs b/watch/src/database/error.rs deleted file mode 100644 index 8c5088fa133..00000000000 --- a/watch/src/database/error.rs +++ /dev/null @@ -1,55 +0,0 @@ -use bls::Error as BlsError; -use diesel::result::{ConnectionError, Error as PgError}; -use eth2::SensitiveError; -use r2d2::Error as PoolError; -use std::fmt; -use types::BeaconStateError; - -#[derive(Debug)] -pub enum Error { - BeaconState(BeaconStateError), - Database(PgError), - DatabaseCorrupted, - InvalidSig(BlsError), - PostgresConnection(ConnectionError), - Pool(PoolError), - SensitiveUrl(SensitiveError), - InvalidRoot, - Other(String), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl From for Error { - fn from(e: BeaconStateError) -> Self { - Error::BeaconState(e) - } -} - -impl From for Error { - fn from(e: ConnectionError) -> Self { - Error::PostgresConnection(e) - } -} - -impl From for Error { - fn from(e: PgError) -> Self { - Error::Database(e) - } -} - -impl From for Error { - fn from(e: PoolError) -> Self { - Error::Pool(e) - } -} - -impl From for Error { - fn from(e: BlsError) -> Self { - Error::InvalidSig(e) - } -} diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs deleted file mode 100644 index 7193b0744aa..00000000000 --- a/watch/src/database/mod.rs +++ /dev/null @@ -1,786 +0,0 @@ -mod config; -mod error; - -pub mod compat; -pub mod models; -pub mod schema; -pub mod utils; -pub mod watch_types; - -use self::schema::{ - active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations, - validators, -}; - -use diesel::dsl::max; -use diesel::prelude::*; -use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection}; -use diesel::upsert::excluded; -use log::{debug, info}; -use std::collections::HashMap; -use std::time::Instant; -use types::{EthSpec, SignedBeaconBlock}; - -pub use self::error::Error; -pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; -pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; - -// Clippy has false positives on these re-exports from Rust 1.75.0-beta.1. -#[allow(unused_imports)] -pub use crate::block_rewards::{ - get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, - get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, - WatchBlockRewards, -}; - -#[allow(unused_imports)] -pub use crate::block_packing::{ - get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, - get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, - WatchBlockPacking, -}; - -#[allow(unused_imports)] -pub use crate::suboptimal_attestations::{ - get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, - get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, - WatchAttestation, WatchSuboptimalAttestation, -}; - -#[allow(unused_imports)] -pub use crate::blockprint::{ - get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, - get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, - WatchBlockprint, -}; - -pub use config::Config; - -/// Batch inserts cannot exceed a certain size. -/// See https://github.com/diesel-rs/diesel/issues/2414. -/// For some reason, this seems to translate to 65535 / 5 (13107) records. -pub const MAX_SIZE_BATCH_INSERT: usize = 13107; - -pub type PgPool = Pool>; -pub type PgConn = PooledConnection>; - -/// Connect to a Postgresql database and build a connection pool. -pub fn build_connection_pool(config: &Config) -> Result { - let database_url = config.clone().build_database_url(); - info!("Building connection pool at: {database_url}"); - let pg = ConnectionManager::::new(&database_url); - Builder::new().build(pg).map_err(Error::Pool) -} - -/// Retrieve an idle connection from the pool. -pub fn get_connection(pool: &PgPool) -> Result { - pool.get().map_err(Error::Pool) -} - -/// Insert the active config into the database. This is used to check if the connected beacon node -/// is compatible with the database. These values will not change (except -/// `current_blockprint_checkpoint`). -pub fn insert_active_config( - conn: &mut PgConn, - new_config_name: String, - new_slots_per_epoch: u64, -) -> Result<(), Error> { - use self::active_config::dsl::*; - - diesel::insert_into(active_config) - .values(&vec![( - id.eq(1), - config_name.eq(new_config_name), - slots_per_epoch.eq(new_slots_per_epoch as i32), - )]) - .on_conflict_do_nothing() - .execute(conn)?; - - Ok(()) -} - -/// Get the active config from the database. -pub fn get_active_config(conn: &mut PgConn) -> Result, Error> { - use self::active_config::dsl::*; - Ok(active_config - .select((config_name, slots_per_epoch)) - .filter(id.eq(1)) - .first::<(String, i32)>(conn) - .optional()?) -} - -/* - * INSERT statements - */ - -/// Inserts a single row into the `canonical_slots` table. -/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. -/// -/// On a conflict, it will do nothing, leaving the old value. -pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> { - diesel::insert_into(canonical_slots::table) - .values(&new_slot) - .on_conflict_do_nothing() - .execute(conn)?; - - debug!("Canonical slot inserted: {}", new_slot.slot); - Ok(()) -} - -pub fn insert_beacon_block( - conn: &mut PgConn, - block: SignedBeaconBlock, - root: WatchHash, -) -> Result<(), Error> { - use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot}; - - let block_message = block.message(); - - // Pull out relevant values from the block. - let slot = WatchSlot::from_slot(block.slot()); - let parent_root = WatchHash::from_hash(block.parent_root()); - let proposer_index = block_message.proposer_index() as i32; - let graffiti = block_message.body().graffiti().as_utf8_lossy(); - let attestation_count = block_message.body().attestations_len() as i32; - - let full_payload = block_message.execution_payload().ok(); - - let transaction_count: Option = if let Some(bellatrix_payload) = - full_payload.and_then(|payload| payload.execution_payload_bellatrix().ok()) - { - Some(bellatrix_payload.transactions.len() as i32) - } else { - full_payload - .and_then(|payload| payload.execution_payload_capella().ok()) - .map(|payload| payload.transactions.len() as i32) - }; - - let withdrawal_count: Option = full_payload - .and_then(|payload| payload.execution_payload_capella().ok()) - .map(|payload| payload.withdrawals.len() as i32); - - let block_to_add = WatchBeaconBlock { - slot, - root, - parent_root, - attestation_count, - transaction_count, - withdrawal_count, - }; - - let proposer_info_to_add = WatchProposerInfo { - slot, - proposer_index, - graffiti, - }; - - // Update the canonical slots table. - diesel::update(canonical_slots::table) - .set(beacon_block.eq(root)) - .filter(canonical_slot.eq(slot)) - // Do not overwrite the value if it already exists. - .filter(beacon_block.is_null()) - .execute(conn)?; - - diesel::insert_into(beacon_blocks::table) - .values(block_to_add) - .on_conflict_do_nothing() - .execute(conn)?; - - diesel::insert_into(proposer_info::table) - .values(proposer_info_to_add) - .on_conflict_do_nothing() - .execute(conn)?; - - debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}"); - Ok(()) -} - -/// Insert a validator into the `validators` table -/// -/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`. -pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> { - use self::validators::dsl::*; - let new_index = validator.index; - let new_public_key = validator.public_key; - - diesel::insert_into(validators) - .values(validator) - .on_conflict(index) - .do_update() - .set(( - status.eq(excluded(status)), - activation_epoch.eq(excluded(activation_epoch)), - exit_epoch.eq(excluded(exit_epoch)), - )) - .execute(conn)?; - - debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}"); - Ok(()) -} - -/// Insert a batch of values into the `validators` table. -/// -/// On a conflict, it will do nothing. -/// -/// Should not be used when updating validators. -/// Validators should be updated through the `insert_validator` function which contains the correct -/// `on_conflict` clauses. -pub fn insert_batch_validators( - conn: &mut PgConn, - all_validators: Vec, -) -> Result<(), Error> { - use self::validators::dsl::*; - - let mut count = 0; - - for chunk in all_validators.chunks(1000) { - count += diesel::insert_into(validators) - .values(chunk) - .on_conflict_do_nothing() - .execute(conn)?; - } - - debug!("Validators inserted, count: {count}"); - Ok(()) -} - -/* - * SELECT statements - */ - -/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. -pub fn get_canonical_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`. -/// Only returns the non-skipped slot which matches `root`. -pub fn get_canonical_slot_by_root( - conn: &mut PgConn, - root_query: WatchHash, -) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .filter(root.eq(root_query)) - .filter(skipped.eq(false)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given -/// `slot_query`. -#[allow(dead_code)] -pub fn get_root_at_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .select(root) - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value -/// of `slot`. -pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .order_by(slot.asc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical slot requested: lowest, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value -/// of `slot` and where `skipped == false`. -pub fn get_lowest_non_skipped_canonical_slot( - conn: &mut PgConn, -) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .filter(skipped.eq(false)) - .order_by(slot.asc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})"); - Ok(result) -} - -/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value -/// of `slot`. -pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .order_by(slot.desc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical slot requested: highest, time taken: {time_taken:?}"); - Ok(result) -} - -/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value -/// of `slot` and where `skipped == false`. -pub fn get_highest_non_skipped_canonical_slot( - conn: &mut PgConn, -) -> Result, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .filter(skipped.eq(false)) - .order_by(slot.desc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}"); - Ok(result) -} - -/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <= -/// `end_slot`. -pub fn get_canonical_slots_by_range( - conn: &mut PgConn, - start_slot: WatchSlot, - end_slot: WatchSlot, -) -> Result>, Error> { - use self::canonical_slots::dsl::*; - let timer = Instant::now(); - - let result = canonical_slots - .filter(slot.ge(start_slot)) - .filter(slot.le(end_slot)) - .load::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!( - "Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}", - start_slot.as_u64(), - end_slot.as_u64(), - time_taken - ); - Ok(result) -} - -/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null` -/// and `skipped == false` -pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result, Error> { - use self::canonical_slots::dsl::*; - - let result = canonical_slots - .select(root) - .filter(beacon_block.is_null()) - .filter(skipped.eq(false)) - .order_by(slot.desc()) - .load::(conn)?; - - Ok(result) -} - -/// Selects the row from the `beacon_blocks` table where `slot` is minimum. -pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result, Error> { - use self::beacon_blocks::dsl::*; - let timer = Instant::now(); - - let result = beacon_blocks - .order_by(slot.asc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Beacon block requested: lowest, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects the row from the `beacon_blocks` table where `slot` is maximum. -pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result, Error> { - use self::beacon_blocks::dsl::*; - let timer = Instant::now(); - - let result = beacon_blocks - .order_by(slot.desc()) - .limit(1) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Beacon block requested: highest, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`. -pub fn get_beacon_block_by_root( - conn: &mut PgConn, - root_query: WatchHash, -) -> Result, Error> { - use self::beacon_blocks::dsl::*; - let timer = Instant::now(); - - let result = beacon_blocks - .filter(root.eq(root_query)) - .first::(conn) - .optional()?; - let time_taken = timer.elapsed(); - debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`. -pub fn get_beacon_block_by_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::beacon_blocks::dsl::*; - let timer = Instant::now(); - - let result = beacon_blocks - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - let time_taken = timer.elapsed(); - debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`. -/// This fetches the next block in the database. -/// -/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain). -pub fn get_beacon_block_with_parent( - conn: &mut PgConn, - parent: WatchHash, -) -> Result, Error> { - use self::beacon_blocks::dsl::*; - let timer = Instant::now(); - - let result = beacon_blocks - .filter(parent_root.eq(parent)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <= -/// `end_slot`. -pub fn get_beacon_blocks_by_range( - conn: &mut PgConn, - start_slot: WatchSlot, - end_slot: WatchSlot, -) -> Result>, Error> { - use self::beacon_blocks::dsl::*; - let timer = Instant::now(); - - let result = beacon_blocks - .filter(slot.ge(start_slot)) - .filter(slot.le(end_slot)) - .load::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`. -pub fn get_proposer_info_by_root( - conn: &mut PgConn, - root_query: WatchHash, -) -> Result, Error> { - use self::beacon_blocks::dsl::{beacon_blocks, root}; - use self::proposer_info::dsl::*; - let timer = Instant::now(); - - let join = beacon_blocks.inner_join(proposer_info); - - let result = join - .select((slot, proposer_index, graffiti)) - .filter(root.eq(root_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. -pub fn get_proposer_info_by_slot( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result, Error> { - use self::proposer_info::dsl::*; - let timer = Instant::now(); - - let result = proposer_info - .filter(slot.eq(slot_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`. -/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. -#[allow(dead_code)] -pub fn get_proposer_info_by_range( - conn: &mut PgConn, - start_slot: WatchSlot, - end_slot: WatchSlot, -) -> Result>, Error> { - use self::proposer_info::dsl::*; - let timer = Instant::now(); - - let result = proposer_info - .filter(slot.ge(start_slot)) - .filter(slot.le(end_slot)) - .load::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!( - "Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}" - ); - Ok(result) -} - -pub fn get_validators_latest_proposer_info( - conn: &mut PgConn, - indices_query: Vec, -) -> Result, Error> { - use self::proposer_info::dsl::*; - - let proposers = proposer_info - .filter(proposer_index.eq_any(indices_query)) - .load::(conn)?; - - let mut result = HashMap::new(); - for proposer in proposers { - result - .entry(proposer.proposer_index) - .or_insert_with(|| proposer.clone()); - let entry = result - .get_mut(&proposer.proposer_index) - .ok_or_else(|| Error::Other("An internal error occured".to_string()))?; - if proposer.slot > entry.slot { - entry.slot = proposer.slot - } - } - - Ok(result) -} - -/// Selects the max(`slot`) and `proposer_index` of each unique index in the -/// `proposer_info` table and returns them formatted as a `HashMap`. -/// Only returns rows which have `slot <= target_slot`. -/// -/// Ideally, this would return the full row, but I have not found a way to do that without using -/// a much more expensive SQL query. -pub fn get_all_validators_latest_proposer_info_at_slot( - conn: &mut PgConn, - target_slot: WatchSlot, -) -> Result, Error> { - use self::proposer_info::dsl::*; - - let latest_proposals: Vec<(i32, Option)> = proposer_info - .group_by(proposer_index) - .select((proposer_index, max(slot))) - .filter(slot.le(target_slot)) - .load::<(i32, Option)>(conn)?; - - let mut result = HashMap::new(); - - for proposal in latest_proposals { - if let Some(latest_slot) = proposal.1 { - result.insert(latest_slot, proposal.0); - } - } - - Ok(result) -} - -/// Selects a single row from the `validators` table corresponding to a given -/// `validator_index_query`. -pub fn get_validator_by_index( - conn: &mut PgConn, - validator_index_query: i32, -) -> Result, Error> { - use self::validators::dsl::*; - let timer = Instant::now(); - - let result = validators - .filter(index.eq(validator_index_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row from the `validators` table corresponding to a given -/// `public_key_query`. -pub fn get_validator_by_public_key( - conn: &mut PgConn, - public_key_query: WatchPK, -) -> Result, Error> { - use self::validators::dsl::*; - let timer = Instant::now(); - - let result = validators - .filter(public_key.eq(public_key_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects all rows from the `validators` table which have an `index` contained in -/// the `indices_query`. -#[allow(dead_code)] -pub fn get_validators_by_indices( - conn: &mut PgConn, - indices_query: Vec, -) -> Result, Error> { - use self::validators::dsl::*; - let timer = Instant::now(); - - let query_len = indices_query.len(); - let result = validators - .filter(index.eq_any(indices_query)) - .load::(conn)?; - - let time_taken = timer.elapsed(); - debug!("{query_len} validators requested, time taken: {time_taken:?}"); - Ok(result) -} - -// Selects all rows from the `validators` table. -pub fn get_all_validators(conn: &mut PgConn) -> Result, Error> { - use self::validators::dsl::*; - let timer = Instant::now(); - - let result = validators.load::(conn)?; - - let time_taken = timer.elapsed(); - debug!("All validators requested, time taken: {time_taken:?}"); - Ok(result) -} - -/// Counts the number of rows in the `validators` table. -#[allow(dead_code)] -pub fn count_validators(conn: &mut PgConn) -> Result { - use self::validators::dsl::*; - - validators.count().get_result(conn).map_err(Error::Database) -} - -/// Counts the number of rows in the `validators` table where -/// `activation_epoch <= target_slot.epoch()`. -pub fn count_validators_activated_before_slot( - conn: &mut PgConn, - target_slot: WatchSlot, - slots_per_epoch: u64, -) -> Result { - use self::validators::dsl::*; - - let target_epoch = target_slot.epoch(slots_per_epoch); - - validators - .count() - .filter(activation_epoch.le(target_epoch.as_u64() as i32)) - .get_result(conn) - .map_err(Error::Database) -} - -/* - * DELETE statements. - */ - -/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. -/// -/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from -/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`, -/// `block_packing` and `proposer_info`. -pub fn delete_canonical_slots_above( - conn: &mut PgConn, - slot_query: WatchSlot, -) -> Result { - use self::canonical_slots::dsl::*; - - let result = diesel::delete(canonical_slots) - .filter(slot.gt(slot_query)) - .execute(conn)?; - - debug!("Deleted canonical slots above {slot_query}: {result} rows deleted"); - Ok(result) -} - -/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater -/// than `epoch_start_slot_query`. -pub fn delete_suboptimal_attestations_above( - conn: &mut PgConn, - epoch_start_slot_query: WatchSlot, -) -> Result { - use self::suboptimal_attestations::dsl::*; - - let result = diesel::delete(suboptimal_attestations) - .filter(epoch_start_slot.gt(epoch_start_slot_query)) - .execute(conn)?; - - debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}"); - Ok(result) -} diff --git a/watch/src/database/models.rs b/watch/src/database/models.rs deleted file mode 100644 index f42444d6612..00000000000 --- a/watch/src/database/models.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::database::{ - schema::{beacon_blocks, canonical_slots, proposer_info, validators}, - watch_types::{WatchHash, WatchPK, WatchSlot}, -}; -use diesel::{Insertable, Queryable}; -use serde::{Deserialize, Serialize}; -use std::hash::{Hash, Hasher}; - -pub type WatchEpoch = i32; - -#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = canonical_slots)] -pub struct WatchCanonicalSlot { - pub slot: WatchSlot, - pub root: WatchHash, - pub skipped: bool, - pub beacon_block: Option, -} - -#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = beacon_blocks)] -pub struct WatchBeaconBlock { - pub slot: WatchSlot, - pub root: WatchHash, - pub parent_root: WatchHash, - pub attestation_count: i32, - pub transaction_count: Option, - pub withdrawal_count: Option, -} - -#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = validators)] -pub struct WatchValidator { - pub index: i32, - pub public_key: WatchPK, - pub status: String, - pub activation_epoch: Option, - pub exit_epoch: Option, -} - -// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed. -impl Hash for WatchValidator { - fn hash(&self, state: &mut H) { - self.index.hash(state); - self.status.hash(state); - self.activation_epoch.hash(state); - self.exit_epoch.hash(state); - } -} - -impl PartialEq for WatchValidator { - fn eq(&self, other: &Self) -> bool { - self.index == other.index - && self.status == other.status - && self.activation_epoch == other.activation_epoch - && self.exit_epoch == other.exit_epoch - } -} -impl Eq for WatchValidator {} - -#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = proposer_info)] -pub struct WatchProposerInfo { - pub slot: WatchSlot, - pub proposer_index: i32, - pub graffiti: String, -} diff --git a/watch/src/database/schema.rs b/watch/src/database/schema.rs deleted file mode 100644 index 32f22d506db..00000000000 --- a/watch/src/database/schema.rs +++ /dev/null @@ -1,102 +0,0 @@ -// @generated automatically by Diesel CLI. - -diesel::table! { - active_config (id) { - id -> Int4, - config_name -> Text, - slots_per_epoch -> Int4, - } -} - -diesel::table! { - beacon_blocks (slot) { - slot -> Int4, - root -> Bytea, - parent_root -> Bytea, - attestation_count -> Int4, - transaction_count -> Nullable, - withdrawal_count -> Nullable, - } -} - -diesel::table! { - block_packing (slot) { - slot -> Int4, - available -> Int4, - included -> Int4, - prior_skip_slots -> Int4, - } -} - -diesel::table! { - block_rewards (slot) { - slot -> Int4, - total -> Int4, - attestation_reward -> Int4, - sync_committee_reward -> Int4, - } -} - -diesel::table! { - blockprint (slot) { - slot -> Int4, - best_guess -> Text, - } -} - -diesel::table! { - canonical_slots (slot) { - slot -> Int4, - root -> Bytea, - skipped -> Bool, - beacon_block -> Nullable, - } -} - -diesel::table! { - proposer_info (slot) { - slot -> Int4, - proposer_index -> Int4, - graffiti -> Text, - } -} - -diesel::table! { - suboptimal_attestations (epoch_start_slot, index) { - epoch_start_slot -> Int4, - index -> Int4, - source -> Bool, - head -> Bool, - target -> Bool, - } -} - -diesel::table! { - validators (index) { - index -> Int4, - public_key -> Bytea, - status -> Text, - activation_epoch -> Nullable, - exit_epoch -> Nullable, - } -} - -diesel::joinable!(block_packing -> beacon_blocks (slot)); -diesel::joinable!(block_rewards -> beacon_blocks (slot)); -diesel::joinable!(blockprint -> beacon_blocks (slot)); -diesel::joinable!(proposer_info -> beacon_blocks (slot)); -diesel::joinable!(proposer_info -> validators (proposer_index)); -diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot)); -diesel::joinable!(suboptimal_attestations -> validators (index)); - -diesel::allow_tables_to_appear_in_same_query!( - active_config, - beacon_blocks, - block_packing, - block_rewards, - blockprint, - canonical_slots, - proposer_info, - suboptimal_attestations, - validators, -); diff --git a/watch/src/database/utils.rs b/watch/src/database/utils.rs deleted file mode 100644 index 9134c3698f6..00000000000 --- a/watch/src/database/utils.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(dead_code)] -use crate::database::config::Config; -use diesel::prelude::*; -use diesel_migrations::{FileBasedMigrations, MigrationHarness}; - -/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`. -/// -/// This is useful for creating or dropping databases, since these actions must be done by -/// logging into another database. -pub fn get_config_using_default_db(config: &Config) -> (Config, String) { - let mut config = config.clone(); - let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone()); - (config, new_dbname) -} - -/// Runs the set of migrations as detected in the local directory. -/// Equivalent to `diesel migration run`. -/// -/// Contains `unwrap`s so is only suitable for test code. -/// TODO(mac) refactor to return Result -pub fn run_migrations(config: &Config) -> PgConnection { - let database_url = config.clone().build_database_url(); - let mut conn = PgConnection::establish(&database_url).unwrap(); - let migrations = FileBasedMigrations::find_migrations_directory().unwrap(); - conn.run_pending_migrations(migrations).unwrap(); - conn.begin_test_transaction().unwrap(); - conn -} diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs deleted file mode 100644 index c2b67084c94..00000000000 --- a/watch/src/database/watch_types.rs +++ /dev/null @@ -1,119 +0,0 @@ -use crate::database::error::Error; -use diesel::{ - sql_types::{Binary, Integer}, - AsExpression, FromSqlRow, -}; -use serde::{Deserialize, Serialize}; -use std::fmt; -use std::str::FromStr; -use types::{Epoch, Hash256, PublicKeyBytes, Slot}; -#[derive( - Clone, - Copy, - Debug, - AsExpression, - FromSqlRow, - Deserialize, - Serialize, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, -)] -#[diesel(sql_type = Integer)] -pub struct WatchSlot(Slot); - -impl fmt::Display for WatchSlot { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl WatchSlot { - pub fn new(slot: u64) -> Self { - Self(Slot::new(slot)) - } - - pub fn from_slot(slot: Slot) -> Self { - Self(slot) - } - - pub fn as_slot(self) -> Slot { - self.0 - } - - pub fn as_u64(self) -> u64 { - self.0.as_u64() - } - - pub fn epoch(self, slots_per_epoch: u64) -> Epoch { - self.as_slot().epoch(slots_per_epoch) - } -} - -#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)] -#[diesel(sql_type = Binary)] -pub struct WatchHash(Hash256); - -impl fmt::Display for WatchHash { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self.0) - } -} - -impl WatchHash { - pub fn as_hash(&self) -> Hash256 { - self.0 - } - - pub fn from_hash(hash: Hash256) -> Self { - WatchHash(hash) - } - - pub fn as_bytes(&self) -> &[u8] { - self.0.as_slice() - } - - pub fn from_bytes(src: &[u8]) -> Result { - if src.len() == 32 { - Ok(WatchHash(Hash256::from_slice(src))) - } else { - Err(Error::InvalidRoot) - } - } -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)] -#[diesel(sql_type = Binary)] -pub struct WatchPK(PublicKeyBytes); - -impl fmt::Display for WatchPK { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self.0) - } -} - -impl WatchPK { - pub fn as_bytes(&self) -> &[u8] { - self.0.as_serialized() - } - - pub fn from_bytes(src: &[u8]) -> Result { - Ok(WatchPK(PublicKeyBytes::deserialize(src)?)) - } - - pub fn from_pubkey(key: PublicKeyBytes) -> Self { - WatchPK(key) - } -} - -impl FromStr for WatchPK { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(WatchPK( - PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?, - )) - } -} diff --git a/watch/src/lib.rs b/watch/src/lib.rs deleted file mode 100644 index 664c9451655..00000000000 --- a/watch/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -#![cfg(unix)] -pub mod block_packing; -pub mod block_rewards; -pub mod blockprint; -pub mod cli; -pub mod client; -pub mod config; -pub mod database; -pub mod logger; -pub mod server; -pub mod suboptimal_attestations; -pub mod updater; diff --git a/watch/src/logger.rs b/watch/src/logger.rs deleted file mode 100644 index 49310b42aae..00000000000 --- a/watch/src/logger.rs +++ /dev/null @@ -1,24 +0,0 @@ -use env_logger::Builder; -use log::{info, LevelFilter}; -use std::process; - -pub fn init_logger(log_level: &str) { - let log_level = match log_level.to_lowercase().as_str() { - "trace" => LevelFilter::Trace, - "debug" => LevelFilter::Debug, - "info" => LevelFilter::Info, - "warn" => LevelFilter::Warn, - "error" => LevelFilter::Error, - _ => { - eprintln!("Unsupported log level"); - process::exit(1) - } - }; - - let mut builder = Builder::new(); - builder.filter(Some("watch"), log_level); - - builder.init(); - - info!("Logger initialized with log-level: {log_level}"); -} diff --git a/watch/src/main.rs b/watch/src/main.rs deleted file mode 100644 index f971747da42..00000000000 --- a/watch/src/main.rs +++ /dev/null @@ -1,41 +0,0 @@ -#[cfg(unix)] -use std::process; - -#[cfg(unix)] -mod block_packing; -#[cfg(unix)] -mod block_rewards; -#[cfg(unix)] -mod blockprint; -#[cfg(unix)] -mod cli; -#[cfg(unix)] -mod config; -#[cfg(unix)] -mod database; -#[cfg(unix)] -mod logger; -#[cfg(unix)] -mod server; -#[cfg(unix)] -mod suboptimal_attestations; -#[cfg(unix)] -mod updater; - -#[cfg(unix)] -#[tokio::main] -async fn main() { - match cli::run().await { - Ok(()) => process::exit(0), - Err(e) => { - eprintln!("Command failed with: {}", e); - drop(e); - process::exit(1) - } - } -} - -#[cfg(windows)] -fn main() { - eprintln!("Windows is not supported. Exiting."); -} diff --git a/watch/src/server/config.rs b/watch/src/server/config.rs deleted file mode 100644 index a7d38e706f8..00000000000 --- a/watch/src/server/config.rs +++ /dev/null @@ -1,28 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::net::IpAddr; - -pub const LISTEN_ADDR: &str = "127.0.0.1"; - -pub const fn listen_port() -> u16 { - 5059 -} -fn listen_addr() -> IpAddr { - LISTEN_ADDR.parse().expect("Server address is not valid") -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - #[serde(default = "listen_addr")] - pub listen_addr: IpAddr, - #[serde(default = "listen_port")] - pub listen_port: u16, -} - -impl Default for Config { - fn default() -> Self { - Self { - listen_addr: listen_addr(), - listen_port: listen_port(), - } - } -} diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs deleted file mode 100644 index e2c8f0f42ac..00000000000 --- a/watch/src/server/error.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::database::Error as DbError; -use axum::Error as AxumError; -use axum::{http::StatusCode, response::IntoResponse, Json}; -use hyper::Error as HyperError; -use serde_json::json; -use std::io::Error as IoError; - -#[derive(Debug)] -#[allow(dead_code)] -pub enum Error { - Axum(AxumError), - Hyper(HyperError), - Database(DbError), - IoError(IoError), - BadRequest, - NotFound, - Other(String), -} - -impl IntoResponse for Error { - fn into_response(self) -> axum::response::Response { - let (status, error_message) = match self { - Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"), - Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"), - _ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"), - }; - (status, Json(json!({ "error": error_message }))).into_response() - } -} - -impl From for Error { - fn from(e: HyperError) -> Self { - Error::Hyper(e) - } -} - -impl From for Error { - fn from(e: AxumError) -> Self { - Error::Axum(e) - } -} - -impl From for Error { - fn from(e: DbError) -> Self { - Error::Database(e) - } -} - -impl From for Error { - fn from(e: IoError) -> Self { - Error::IoError(e) - } -} - -impl From for Error { - fn from(e: String) -> Self { - Error::Other(e) - } -} diff --git a/watch/src/server/handler.rs b/watch/src/server/handler.rs deleted file mode 100644 index 6777026867e..00000000000 --- a/watch/src/server/handler.rs +++ /dev/null @@ -1,266 +0,0 @@ -use crate::database::{ - self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK, - WatchProposerInfo, WatchSlot, WatchValidator, -}; -use crate::server::Error; -use axum::{ - extract::{Path, Query}, - Extension, Json, -}; -use eth2::types::BlockId; -use std::collections::HashMap; -use std::str::FromStr; - -pub async fn get_slot( - Path(slot): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - Ok(Json(database::get_canonical_slot( - &mut conn, - WatchSlot::new(slot), - )?)) -} - -pub async fn get_slot_lowest( - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - Ok(Json(database::get_lowest_canonical_slot(&mut conn)?)) -} - -pub async fn get_slot_highest( - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - Ok(Json(database::get_highest_canonical_slot(&mut conn)?)) -} - -pub async fn get_slots_by_range( - Query(query): Query>, - Extension(pool): Extension, -) -> Result>>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - if let Some(start_slot) = query.get("start_slot") { - if let Some(end_slot) = query.get("end_slot") { - if start_slot > end_slot { - Err(Error::BadRequest) - } else { - Ok(Json(database::get_canonical_slots_by_range( - &mut conn, - WatchSlot::new(*start_slot), - WatchSlot::new(*end_slot), - )?)) - } - } else { - Err(Error::BadRequest) - } - } else { - Err(Error::BadRequest) - } -} - -pub async fn get_block( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?; - match block_id { - BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( - &mut conn, - WatchSlot::from_slot(slot), - )?)), - BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root( - &mut conn, - WatchHash::from_hash(root), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub async fn get_block_lowest( - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - Ok(Json(database::get_lowest_beacon_block(&mut conn)?)) -} - -pub async fn get_block_highest( - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - Ok(Json(database::get_highest_beacon_block(&mut conn)?)) -} - -pub async fn get_block_previous( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { - BlockId::Root(root) => { - if let Some(block) = - database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))? - .map(|block| block.parent_root) - { - Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?)) - } else { - Err(Error::NotFound) - } - } - BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( - &mut conn, - WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub async fn get_block_next( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { - BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent( - &mut conn, - WatchHash::from_hash(root), - )?)), - BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( - &mut conn, - WatchSlot::from_slot(slot + 1_u64), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub async fn get_blocks_by_range( - Query(query): Query>, - Extension(pool): Extension, -) -> Result>>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - if let Some(start_slot) = query.get("start_slot") { - if let Some(end_slot) = query.get("end_slot") { - if start_slot > end_slot { - Err(Error::BadRequest) - } else { - Ok(Json(database::get_beacon_blocks_by_range( - &mut conn, - WatchSlot::new(*start_slot), - WatchSlot::new(*end_slot), - )?)) - } - } else { - Err(Error::BadRequest) - } - } else { - Err(Error::BadRequest) - } -} - -pub async fn get_block_proposer( - Path(block_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { - BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root( - &mut conn, - WatchHash::from_hash(root), - )?)), - BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot( - &mut conn, - WatchSlot::from_slot(slot), - )?)), - _ => Err(Error::BadRequest), - } -} - -pub async fn get_validator( - Path(validator_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - if validator_query.starts_with("0x") { - let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; - Ok(Json(database::get_validator_by_public_key( - &mut conn, pubkey, - )?)) - } else { - let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; - Ok(Json(database::get_validator_by_index(&mut conn, index)?)) - } -} - -pub async fn get_all_validators( - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - Ok(Json(database::get_all_validators(&mut conn)?)) -} - -pub async fn get_validator_latest_proposal( - Path(validator_query): Path, - Extension(pool): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - if validator_query.starts_with("0x") { - let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; - let validator = - database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?; - Ok(Json(database::get_validators_latest_proposer_info( - &mut conn, - vec![validator.index], - )?)) - } else { - let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; - Ok(Json(database::get_validators_latest_proposer_info( - &mut conn, - vec![index], - )?)) - } -} - -pub async fn get_client_breakdown( - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - - if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { - Ok(Json(database::get_validators_clients_at_slot( - &mut conn, - target_slot.slot, - slots_per_epoch, - )?)) - } else { - Err(Error::Database(DbError::Other( - "No slots found in database.".to_string(), - ))) - } -} - -pub async fn get_client_breakdown_percentages( - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = database::get_connection(&pool).map_err(Error::Database)?; - - let mut result = HashMap::new(); - if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { - let total = database::count_validators_activated_before_slot( - &mut conn, - target_slot.slot, - slots_per_epoch, - )?; - let clients = - database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?; - for (client, number) in clients.iter() { - let percentage: f64 = *number as f64 / total as f64 * 100.0; - result.insert(client.to_string(), percentage); - } - } - - Ok(Json(result)) -} diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs deleted file mode 100644 index 08036db9510..00000000000 --- a/watch/src/server/mod.rs +++ /dev/null @@ -1,136 +0,0 @@ -use crate::block_packing::block_packing_routes; -use crate::block_rewards::block_rewards_routes; -use crate::blockprint::blockprint_routes; -use crate::config::Config as FullConfig; -use crate::database::{self, PgPool}; -use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; -use axum::{ - http::{StatusCode, Uri}, - routing::get, - Extension, Json, Router, -}; -use eth2::types::ErrorMessage; -use log::info; -use std::future::{Future, IntoFuture}; -use std::net::{SocketAddr, TcpListener}; - -pub use config::Config; -pub use error::Error; - -mod config; -mod error; -mod handler; - -pub async fn serve(config: FullConfig) -> Result<(), Error> { - let db = database::build_connection_pool(&config.database)?; - let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? - .ok_or_else(|| { - Error::Other( - "Database not found. Please run the updater prior to starting the server" - .to_string(), - ) - })?; - - let (_addr, server) = start_server(&config, slots_per_epoch as u64, db)?; - - server.await?; - - Ok(()) -} - -/// Creates a server that will serve requests using information from `config`. -/// -/// The server will create its own connection pool to serve connections to the database. -/// This is separate to the connection pool that is used for the `updater`. -/// -/// The server will shut down gracefully when the `shutdown` future resolves. -/// -/// ## Returns -/// -/// This function will bind the server to the address specified in the config and then return a -/// Future representing the actual server that will need to be awaited. -/// -/// ## Errors -/// -/// Returns an error if the server is unable to bind or there is another error during -/// configuration. -pub fn start_server( - config: &FullConfig, - slots_per_epoch: u64, - pool: PgPool, -) -> Result< - ( - SocketAddr, - impl Future> + 'static, - ), - Error, -> { - let mut routes = Router::new() - .route("/v1/slots", get(handler::get_slots_by_range)) - .route("/v1/slots/:slot", get(handler::get_slot)) - .route("/v1/slots/lowest", get(handler::get_slot_lowest)) - .route("/v1/slots/highest", get(handler::get_slot_highest)) - .route("/v1/slots/:slot/block", get(handler::get_block)) - .route("/v1/blocks", get(handler::get_blocks_by_range)) - .route("/v1/blocks/:block", get(handler::get_block)) - .route("/v1/blocks/lowest", get(handler::get_block_lowest)) - .route("/v1/blocks/highest", get(handler::get_block_highest)) - .route( - "/v1/blocks/:block/previous", - get(handler::get_block_previous), - ) - .route("/v1/blocks/:block/next", get(handler::get_block_next)) - .route( - "/v1/blocks/:block/proposer", - get(handler::get_block_proposer), - ) - .route("/v1/validators/:validator", get(handler::get_validator)) - .route("/v1/validators/all", get(handler::get_all_validators)) - .route( - "/v1/validators/:validator/latest_proposal", - get(handler::get_validator_latest_proposal), - ) - .route("/v1/clients", get(handler::get_client_breakdown)) - .route( - "/v1/clients/percentages", - get(handler::get_client_breakdown_percentages), - ) - .merge(attestation_routes()) - .merge(blockprint_routes()) - .merge(block_packing_routes()) - .merge(block_rewards_routes()); - - if config.blockprint.enabled && config.updater.attestations { - routes = routes.merge(blockprint_attestation_routes()) - } - - let app = routes - .fallback(route_not_found) - .layer(Extension(pool)) - .layer(Extension(slots_per_epoch)); - - let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); - let listener = TcpListener::bind(addr)?; - listener.set_nonblocking(true)?; - - // Read the socket address (it may be different from `addr` if listening on port 0). - let socket_addr = listener.local_addr()?; - - let serve = axum::serve(tokio::net::TcpListener::from_std(listener)?, app); - - info!("HTTP server listening on {}", addr); - - Ok((socket_addr, serve.into_future())) -} - -// The default route indicating that no available routes matched the request. -async fn route_not_found(uri: Uri) -> (StatusCode, Json) { - ( - StatusCode::METHOD_NOT_ALLOWED, - Json(ErrorMessage { - code: StatusCode::METHOD_NOT_ALLOWED.as_u16(), - message: format!("No route for {uri}"), - stacktraces: vec![], - }), - ) -} diff --git a/watch/src/suboptimal_attestations/database.rs b/watch/src/suboptimal_attestations/database.rs deleted file mode 100644 index cb947d250a2..00000000000 --- a/watch/src/suboptimal_attestations/database.rs +++ /dev/null @@ -1,224 +0,0 @@ -use crate::database::{ - schema::{suboptimal_attestations, validators}, - watch_types::{WatchPK, WatchSlot}, - Error, PgConn, MAX_SIZE_BATCH_INSERT, -}; - -use diesel::prelude::*; -use diesel::{Insertable, Queryable}; -use log::debug; -use serde::{Deserialize, Serialize}; -use std::time::Instant; - -use types::Epoch; - -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub struct WatchAttestation { - pub index: i32, - pub epoch: Epoch, - pub source: bool, - pub head: bool, - pub target: bool, -} - -impl WatchAttestation { - pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation { - WatchAttestation { - index, - epoch, - source: true, - head: true, - target: true, - } - } -} - -#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] -#[diesel(table_name = suboptimal_attestations)] -pub struct WatchSuboptimalAttestation { - pub epoch_start_slot: WatchSlot, - pub index: i32, - pub source: bool, - pub head: bool, - pub target: bool, -} - -impl WatchSuboptimalAttestation { - pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation { - WatchAttestation { - index: self.index, - epoch: self.epoch_start_slot.epoch(slots_per_epoch), - source: self.source, - head: self.head, - target: self.target, - } - } -} - -/// Insert a batch of values into the `suboptimal_attestations` table -/// -/// Since attestations technically occur per-slot but we only store them per-epoch (via its -/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a -/// 'suboptimal' attestation could now be 'optimal'. -/// -/// This is handled in the update code, where in the case of a re-org, the affected epoch is -/// deleted completely. -/// -/// On a conflict, it will do nothing. -pub fn insert_batch_suboptimal_attestations( - conn: &mut PgConn, - attestations: Vec, -) -> Result<(), Error> { - use self::suboptimal_attestations::dsl::*; - - let mut count = 0; - let timer = Instant::now(); - - for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) { - count += diesel::insert_into(suboptimal_attestations) - .values(chunk) - .on_conflict_do_nothing() - .execute(conn)?; - } - - let time_taken = timer.elapsed(); - debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}"); - Ok(()) -} - -/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum. -pub fn get_lowest_attestation( - conn: &mut PgConn, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - - Ok(suboptimal_attestations - .order_by(epoch_start_slot.asc()) - .limit(1) - .first::(conn) - .optional()?) -} - -/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum. -pub fn get_highest_attestation( - conn: &mut PgConn, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - - Ok(suboptimal_attestations - .order_by(epoch_start_slot.desc()) - .limit(1) - .first::(conn) - .optional()?) -} - -/// Selects a single row from the `suboptimal_attestations` table corresponding to a given -/// `index_query` and `epoch_query`. -pub fn get_attestation_by_index( - conn: &mut PgConn, - index_query: i32, - epoch_query: Epoch, - slots_per_epoch: u64, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - let timer = Instant::now(); - - let result = suboptimal_attestations - .filter(epoch_start_slot.eq(WatchSlot::from_slot( - epoch_query.start_slot(slots_per_epoch), - ))) - .filter(index.eq(index_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects a single row from the `suboptimal_attestations` table corresponding -/// to a given `pubkey_query` and `epoch_query`. -#[allow(dead_code)] -pub fn get_attestation_by_pubkey( - conn: &mut PgConn, - pubkey_query: WatchPK, - epoch_query: Epoch, - slots_per_epoch: u64, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - use self::validators::dsl::{public_key, validators}; - let timer = Instant::now(); - - let join = validators.inner_join(suboptimal_attestations); - - let result = join - .select((epoch_start_slot, index, source, head, target)) - .filter(epoch_start_slot.eq(WatchSlot::from_slot( - epoch_query.start_slot(slots_per_epoch), - ))) - .filter(public_key.eq(pubkey_query)) - .first::(conn) - .optional()?; - - let time_taken = timer.elapsed(); - debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); - Ok(result) -} - -/// Selects `index` for all validators in the suboptimal_attestations table -/// that have `source == false` for the corresponding `epoch_start_slot_query`. -pub fn get_validators_missed_source( - conn: &mut PgConn, - epoch_start_slot_query: WatchSlot, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - - Ok(suboptimal_attestations - .select(index) - .filter(epoch_start_slot.eq(epoch_start_slot_query)) - .filter(source.eq(false)) - .load::(conn)?) -} - -/// Selects `index` for all validators in the suboptimal_attestations table -/// that have `head == false` for the corresponding `epoch_start_slot_query`. -pub fn get_validators_missed_head( - conn: &mut PgConn, - epoch_start_slot_query: WatchSlot, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - - Ok(suboptimal_attestations - .select(index) - .filter(epoch_start_slot.eq(epoch_start_slot_query)) - .filter(head.eq(false)) - .load::(conn)?) -} - -/// Selects `index` for all validators in the suboptimal_attestations table -/// that have `target == false` for the corresponding `epoch_start_slot_query`. -pub fn get_validators_missed_target( - conn: &mut PgConn, - epoch_start_slot_query: WatchSlot, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - - Ok(suboptimal_attestations - .select(index) - .filter(epoch_start_slot.eq(epoch_start_slot_query)) - .filter(target.eq(false)) - .load::(conn)?) -} - -/// Selects all rows from the `suboptimal_attestations` table for the given -/// `epoch_start_slot_query`. -pub fn get_all_suboptimal_attestations_for_epoch( - conn: &mut PgConn, - epoch_start_slot_query: WatchSlot, -) -> Result, Error> { - use self::suboptimal_attestations::dsl::*; - - Ok(suboptimal_attestations - .filter(epoch_start_slot.eq(epoch_start_slot_query)) - .load::(conn)?) -} diff --git a/watch/src/suboptimal_attestations/mod.rs b/watch/src/suboptimal_attestations/mod.rs deleted file mode 100644 index a94532e8ab2..00000000000 --- a/watch/src/suboptimal_attestations/mod.rs +++ /dev/null @@ -1,56 +0,0 @@ -pub mod database; -pub mod server; -pub mod updater; - -use crate::database::watch_types::WatchSlot; -use crate::updater::error::Error; - -pub use database::{ - get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, - get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, - WatchAttestation, WatchSuboptimalAttestation, -}; - -pub use server::{attestation_routes, blockprint_attestation_routes}; - -use eth2::BeaconNodeHttpClient; -use types::Epoch; - -/// Sends a request to `lighthouse/analysis/attestation_performance`. -/// Formats the response into a vector of `WatchSuboptimalAttestation`. -/// -/// Any attestations with `source == true && head == true && target == true` are ignored. -pub async fn get_attestation_performances( - bn: &BeaconNodeHttpClient, - start_epoch: Epoch, - end_epoch: Epoch, - slots_per_epoch: u64, -) -> Result, Error> { - let mut output = Vec::new(); - let result = bn - .get_lighthouse_analysis_attestation_performance( - start_epoch, - end_epoch, - "global".to_string(), - ) - .await?; - for index in result { - for epoch in index.epochs { - if epoch.1.active { - // Check if the attestation is suboptimal. - if !epoch.1.source || !epoch.1.head || !epoch.1.target { - output.push(WatchSuboptimalAttestation { - epoch_start_slot: WatchSlot::from_slot( - Epoch::new(epoch.0).start_slot(slots_per_epoch), - ), - index: index.index as i32, - source: epoch.1.source, - head: epoch.1.head, - target: epoch.1.target, - }) - } - } - } - } - Ok(output) -} diff --git a/watch/src/suboptimal_attestations/server.rs b/watch/src/suboptimal_attestations/server.rs deleted file mode 100644 index 391db9a41b5..00000000000 --- a/watch/src/suboptimal_attestations/server.rs +++ /dev/null @@ -1,299 +0,0 @@ -use crate::database::{ - get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key, - get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK, - WatchSlot, -}; - -use crate::blockprint::database::construct_validator_blockprints_at_slot; -use crate::server::Error; -use crate::suboptimal_attestations::database::{ - get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, - get_validators_missed_head, get_validators_missed_source, get_validators_missed_target, - WatchAttestation, WatchSuboptimalAttestation, -}; - -use axum::{extract::Path, routing::get, Extension, Json, Router}; -use std::collections::{HashMap, HashSet}; -use std::str::FromStr; -use types::Epoch; - -// Will return Ok(None) if the epoch is not synced or if the validator does not exist. -// In the future it might be worth differentiating these events. -pub async fn get_validator_attestation( - Path((validator_query, epoch_query)): Path<(String, u64)>, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - let epoch = Epoch::new(epoch_query); - - // Ensure the database has synced the target epoch. - if get_canonical_slot( - &mut conn, - WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)), - )? - .is_none() - { - // Epoch is not fully synced. - return Ok(Json(None)); - } - - let index = if validator_query.starts_with("0x") { - let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; - get_validator_by_public_key(&mut conn, pubkey)? - .ok_or(Error::NotFound)? - .index - } else { - i32::from_str(&validator_query).map_err(|_| Error::BadRequest)? - }; - let attestation = if let Some(suboptimal_attestation) = - get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)? - { - Some(suboptimal_attestation.to_attestation(slots_per_epoch)) - } else { - // Attestation was not in database. Check if the validator was active. - match get_validator_by_index(&mut conn, index)? { - Some(validator) => { - if let Some(activation_epoch) = validator.activation_epoch { - if activation_epoch <= epoch.as_u64() as i32 { - if let Some(exit_epoch) = validator.exit_epoch { - if exit_epoch > epoch.as_u64() as i32 { - // Validator is active and has not yet exited. - Some(WatchAttestation::optimal(index, epoch)) - } else { - // Validator has exited. - None - } - } else { - // Validator is active and has not yet exited. - Some(WatchAttestation::optimal(index, epoch)) - } - } else { - // Validator is not yet active. - None - } - } else { - // Validator is not yet active. - None - } - } - None => return Err(Error::Other("Validator index does not exist".to_string())), - } - }; - Ok(Json(attestation)) -} - -pub async fn get_all_validators_attestations( - Path(epoch): Path, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - - let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); - - Ok(Json(get_all_suboptimal_attestations_for_epoch( - &mut conn, - epoch_start_slot, - )?)) -} - -pub async fn get_validators_missed_vote( - Path((vote, epoch)): Path<(String, u64)>, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - - let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); - match vote.to_lowercase().as_str() { - "source" => Ok(Json(get_validators_missed_source( - &mut conn, - epoch_start_slot, - )?)), - "head" => Ok(Json(get_validators_missed_head( - &mut conn, - epoch_start_slot, - )?)), - "target" => Ok(Json(get_validators_missed_target( - &mut conn, - epoch_start_slot, - )?)), - _ => Err(Error::BadRequest), - } -} - -pub async fn get_validators_missed_vote_graffiti( - Path((vote, epoch)): Path<(String, u64)>, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - - let Json(indices) = get_validators_missed_vote( - Path((vote, epoch)), - Extension(pool), - Extension(slots_per_epoch), - ) - .await?; - - let graffitis = get_validators_latest_proposer_info(&mut conn, indices)? - .values() - .map(|info| info.graffiti.clone()) - .collect::>(); - - let mut result = HashMap::new(); - for graffiti in graffitis { - if !result.contains_key(&graffiti) { - result.insert(graffiti.clone(), 0); - } - *result - .get_mut(&graffiti) - .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; - } - - Ok(Json(result)) -} - -pub fn attestation_routes() -> Router { - Router::new() - .route( - "/v1/validators/:validator/attestation/:epoch", - get(get_validator_attestation), - ) - .route( - "/v1/validators/all/attestation/:epoch", - get(get_all_validators_attestations), - ) - .route( - "/v1/validators/missed/:vote/:epoch", - get(get_validators_missed_vote), - ) - .route( - "/v1/validators/missed/:vote/:epoch/graffiti", - get(get_validators_missed_vote_graffiti), - ) -} - -/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be -/// disabled. -pub async fn get_clients_missed_vote( - Path((vote, epoch)): Path<(String, u64)>, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let mut conn = get_connection(&pool).map_err(Error::Database)?; - - let Json(indices) = get_validators_missed_vote( - Path((vote, epoch)), - Extension(pool), - Extension(slots_per_epoch), - ) - .await?; - - // All validators which missed the vote. - let indices_map = indices.into_iter().collect::>(); - - let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); - - // All validators. - let client_map = - construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?; - - let mut result = HashMap::new(); - - for index in indices_map { - if let Some(print) = client_map.get(&index) { - if !result.contains_key(print) { - result.insert(print.clone(), 0); - } - *result - .get_mut(print) - .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; - } - } - - Ok(Json(result)) -} - -pub async fn get_clients_missed_vote_percentages( - Path((vote, epoch)): Path<(String, u64)>, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let Json(clients_counts) = get_clients_missed_vote( - Path((vote, epoch)), - Extension(pool.clone()), - Extension(slots_per_epoch), - ) - .await?; - - let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); - - let mut conn = get_connection(&pool)?; - let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?; - - let mut result = HashMap::new(); - for (client, count) in clients_counts.iter() { - let client_total: f64 = *totals - .get(client) - .ok_or_else(|| Error::Other("Client type mismatch".to_string()))? - as f64; - // `client_total` should never be `0`, but if it is, return `0` instead of `inf`. - if client_total == 0.0 { - result.insert(client.to_string(), 0.0); - } else { - let percentage: f64 = *count as f64 / client_total * 100.0; - result.insert(client.to_string(), percentage); - } - } - - Ok(Json(result)) -} - -pub async fn get_clients_missed_vote_percentages_relative( - Path((vote, epoch)): Path<(String, u64)>, - Extension(pool): Extension, - Extension(slots_per_epoch): Extension, -) -> Result>, Error> { - let Json(clients_counts) = get_clients_missed_vote( - Path((vote, epoch)), - Extension(pool), - Extension(slots_per_epoch), - ) - .await?; - - let mut total: u64 = 0; - for (_, count) in clients_counts.iter() { - total += *count - } - - let mut result = HashMap::new(); - for (client, count) in clients_counts.iter() { - // `total` should never be 0, but if it is, return `-` instead of `inf`. - if total == 0 { - result.insert(client.to_string(), 0.0); - } else { - let percentage: f64 = *count as f64 / total as f64 * 100.0; - result.insert(client.to_string(), percentage); - } - } - - Ok(Json(result)) -} - -pub fn blockprint_attestation_routes() -> Router { - Router::new() - .route( - "/v1/clients/missed/:vote/:epoch", - get(get_clients_missed_vote), - ) - .route( - "/v1/clients/missed/:vote/:epoch/percentages", - get(get_clients_missed_vote_percentages), - ) - .route( - "/v1/clients/missed/:vote/:epoch/percentages/relative", - get(get_clients_missed_vote_percentages_relative), - ) -} diff --git a/watch/src/suboptimal_attestations/updater.rs b/watch/src/suboptimal_attestations/updater.rs deleted file mode 100644 index d8f6ec57d5a..00000000000 --- a/watch/src/suboptimal_attestations/updater.rs +++ /dev/null @@ -1,236 +0,0 @@ -use crate::database::{self, Error as DbError}; -use crate::updater::{Error, UpdateHandler}; - -use crate::suboptimal_attestations::get_attestation_performances; - -use eth2::types::EthSpec; -use log::{debug, error, warn}; - -const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50; - -impl UpdateHandler { - /// Forward fills the `suboptimal_attestations` table starting from the entry with the highest - /// slot. - /// - /// It construts a request to the `attestation_performance` API endpoint with: - /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot) - /// `end_epoch` -> epoch of highest canonical slot - /// - /// It will resync the latest epoch if it is not fully filled but will not overwrite existing - /// values unless there is a re-org. - /// That is, `if highest_filled_slot % slots_per_epoch != 31`. - /// - /// In the event the most recent epoch has no suboptimal attestations, it will attempt to - /// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not - /// accounted for. - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. - pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - - let highest_filled_slot_opt = if self.config.attestations { - database::get_highest_attestation(&mut conn)? - .map(|attestation| attestation.epoch_start_slot.as_slot()) - } else { - return Err(Error::NotEnabled("attestations".to_string())); - }; - - let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { - if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1) - { - // The whole epoch is filled so we can begin syncing the next one. - highest_filled_slot.epoch(self.slots_per_epoch) + 1 - } else { - // The epoch is only partially synced. Try to sync it fully. - highest_filled_slot.epoch(self.slots_per_epoch) - } - } else { - // No rows present in the `suboptimal_attestations` table. Use `canonical_slots` - // instead. - if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? { - lowest_canonical_slot - .slot - .as_slot() - .epoch(self.slots_per_epoch) - } else { - // There are no slots in the database, do not fill the `suboptimal_attestations` - // table. - warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database"); - return Ok(()); - } - }; - - if let Some(highest_canonical_slot) = - database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) - { - let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch); - - // The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations - // which are more than 1 epoch old. - // We assume that `highest_canonical_slot` is near the head of the chain. - end_epoch = end_epoch.saturating_sub(2_u64); - - // If end_epoch == 0 then the chain just started so we need to wait until - // `current_epoch >= 2`. - if end_epoch == 0 { - debug!("Chain just begun, refusing to sync attestations"); - return Ok(()); - } - - if start_epoch > end_epoch { - debug!("Attestations are up to date with the head of the database"); - return Ok(()); - } - - // Ensure the size of the request does not exceed the maximum allowed value. - if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { - end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS - } - - if let Some(lowest_canonical_slot) = - database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) - { - let mut attestations = get_attestation_performances( - &self.bn, - start_epoch, - end_epoch, - self.slots_per_epoch, - ) - .await?; - - // Only insert attestations with corresponding `canonical_slot`s. - attestations.retain(|attestation| { - attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot - && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot - }); - database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; - } else { - return Err(Error::Database(DbError::Other( - "Database did not return a lowest canonical slot when one exists".to_string(), - ))); - } - } else { - // There are no slots in the `canonical_slots` table, but there are entries in the - // `suboptimal_attestations` table. This is a critical failure. It usually means - // someone has manually tampered with the database tables and should not occur during - // normal operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - - Ok(()) - } - - /// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot. - /// - /// It constructs a request to the `attestation_performance` API endpoint with: - /// `start_epoch` -> epoch of the lowest `canonical_slot`. - /// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest - /// canonical slot) - /// - /// It will resync the lowest epoch if it is not fully filled. - /// That is, `if lowest_filled_slot % slots_per_epoch != 0` - /// - /// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to - /// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not - /// accounted for. - /// - /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. - pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - let max_attestation_backfill = self.config.max_backfill_size_epochs; - - // Get the slot of the lowest entry in the `suboptimal_attestations` table. - let lowest_filled_slot_opt = if self.config.attestations { - database::get_lowest_attestation(&mut conn)? - .map(|attestation| attestation.epoch_start_slot.as_slot()) - } else { - return Err(Error::NotEnabled("attestations".to_string())); - }; - - let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { - if lowest_filled_slot % self.slots_per_epoch == 0 { - lowest_filled_slot - .epoch(self.slots_per_epoch) - .saturating_sub(1_u64) - } else { - // The epoch is only partially synced. Try to sync it fully. - lowest_filled_slot.epoch(self.slots_per_epoch) - } - } else { - // No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead. - if let Some(highest_canonical_slot) = - database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) - { - // Subtract 2 since `end_epoch` must be less than the current epoch - 1. - // We assume that `highest_canonical_slot` is near the head of the chain. - highest_canonical_slot - .epoch(self.slots_per_epoch) - .saturating_sub(2_u64) - } else { - // There are no slots in the database, do not backfill the - // `suboptimal_attestations` table. - warn!("Refusing to backfill attestations as there are no slots in the database"); - return Ok(()); - } - }; - - if end_epoch == 0 { - debug!("Attestations backfill is complete"); - return Ok(()); - } - - if let Some(lowest_canonical_slot) = - database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) - { - let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch); - - if start_epoch > end_epoch { - debug!("Attestations are up to date with the base of the database"); - return Ok(()); - } - - // Ensure the request range does not exceed `max_attestation_backfill` or - // `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. - if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) { - start_epoch = end_epoch.saturating_sub(max_attestation_backfill) - } - if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { - start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) - } - - if let Some(highest_canonical_slot) = - database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) - { - let mut attestations = get_attestation_performances( - &self.bn, - start_epoch, - end_epoch, - self.slots_per_epoch, - ) - .await?; - - // Only insert `suboptimal_attestations` with corresponding `canonical_slots`. - attestations.retain(|attestation| { - attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot - && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot - }); - - database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; - } else { - return Err(Error::Database(DbError::Other( - "Database did not return a lowest slot when one exists".to_string(), - ))); - } - } else { - // There are no slots in the `canonical_slot` table, but there are entries in the - // `suboptimal_attestations` table. This is a critical failure. It usually means - // someone has manually tampered with the database tables and should not occur during - // normal operation. - error!("Database is corrupted. Please re-sync the database"); - return Err(Error::Database(DbError::DatabaseCorrupted)); - } - - Ok(()) - } -} diff --git a/watch/src/updater/config.rs b/watch/src/updater/config.rs deleted file mode 100644 index 0179be73db6..00000000000 --- a/watch/src/updater/config.rs +++ /dev/null @@ -1,65 +0,0 @@ -use serde::{Deserialize, Serialize}; - -pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052"; - -pub const fn max_backfill_size_epochs() -> u64 { - 2 -} -pub const fn backfill_stop_epoch() -> u64 { - 0 -} -pub const fn attestations() -> bool { - true -} -pub const fn proposer_info() -> bool { - true -} -pub const fn block_rewards() -> bool { - true -} -pub const fn block_packing() -> bool { - true -} - -fn beacon_node_url() -> String { - BEACON_NODE_URL.to_string() -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - /// The URL of the beacon you wish to sync from. - #[serde(default = "beacon_node_url")] - pub beacon_node_url: String, - /// The maximum size each backfill iteration will allow per request (in epochs). - #[serde(default = "max_backfill_size_epochs")] - pub max_backfill_size_epochs: u64, - /// The epoch at which to never backfill past. - #[serde(default = "backfill_stop_epoch")] - pub backfill_stop_epoch: u64, - /// Whether to sync the suboptimal_attestations table. - #[serde(default = "attestations")] - pub attestations: bool, - /// Whether to sync the proposer_info table. - #[serde(default = "proposer_info")] - pub proposer_info: bool, - /// Whether to sync the block_rewards table. - #[serde(default = "block_rewards")] - pub block_rewards: bool, - /// Whether to sync the block_packing table. - #[serde(default = "block_packing")] - pub block_packing: bool, -} - -impl Default for Config { - fn default() -> Self { - Self { - beacon_node_url: beacon_node_url(), - max_backfill_size_epochs: max_backfill_size_epochs(), - backfill_stop_epoch: backfill_stop_epoch(), - attestations: attestations(), - proposer_info: proposer_info(), - block_rewards: block_rewards(), - block_packing: block_packing(), - } - } -} diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs deleted file mode 100644 index 13c83bcf010..00000000000 --- a/watch/src/updater/error.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::blockprint::Error as BlockprintError; -use crate::database::Error as DbError; -use beacon_node::beacon_chain::BeaconChainError; -use eth2::{Error as Eth2Error, SensitiveError}; -use std::fmt; - -#[derive(Debug)] -#[allow(dead_code)] -pub enum Error { - BeaconChain(BeaconChainError), - Eth2(Eth2Error), - SensitiveUrl(SensitiveError), - Database(DbError), - Blockprint(BlockprintError), - UnableToGetRemoteHead, - BeaconNodeSyncing, - NotEnabled(String), - NoValidatorsFound, - BeaconNodeNotCompatible(String), - InvalidConfig(String), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl From for Error { - fn from(e: BeaconChainError) -> Self { - Error::BeaconChain(e) - } -} - -impl From for Error { - fn from(e: Eth2Error) -> Self { - Error::Eth2(e) - } -} - -impl From for Error { - fn from(e: SensitiveError) -> Self { - Error::SensitiveUrl(e) - } -} - -impl From for Error { - fn from(e: DbError) -> Self { - Error::Database(e) - } -} - -impl From for Error { - fn from(e: BlockprintError) -> Self { - Error::Blockprint(e) - } -} diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs deleted file mode 100644 index 8f5e3f8e4a3..00000000000 --- a/watch/src/updater/handler.rs +++ /dev/null @@ -1,471 +0,0 @@ -use crate::blockprint::WatchBlockprintClient; -use crate::config::Config as FullConfig; -use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot}; -use crate::updater::{Config, Error, WatchSpec}; -use beacon_node::beacon_chain::BeaconChainError; -use eth2::{ - types::{BlockId, SyncingData}, - BeaconNodeHttpClient, SensitiveUrl, -}; -use log::{debug, error, info, warn}; -use std::collections::HashSet; -use std::marker::PhantomData; -use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; - -use crate::updater::{get_beacon_block, get_header, get_validators}; - -const MAX_EXPECTED_REORG_LENGTH: u64 = 32; - -/// Ensure the existing database is valid for this run. -pub async fn ensure_valid_database( - spec: &WatchSpec, - pool: &mut PgPool, -) -> Result<(), Error> { - let mut conn = database::get_connection(pool)?; - - let bn_slots_per_epoch = spec.slots_per_epoch(); - let bn_config_name = spec.network.clone(); - - if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? { - if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 { - Err(Error::InvalidConfig( - "The config stored in the database does not match the beacon node.".to_string(), - )) - } else { - // Configs match. - Ok(()) - } - } else { - // No config exists in the DB. - database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?; - Ok(()) - } -} - -pub struct UpdateHandler { - pub pool: PgPool, - pub bn: BeaconNodeHttpClient, - pub blockprint: Option, - pub config: Config, - pub slots_per_epoch: u64, - pub _phantom: PhantomData, -} - -impl UpdateHandler { - pub async fn new( - bn: BeaconNodeHttpClient, - spec: WatchSpec, - config: FullConfig, - ) -> Result, Error> { - let blockprint = if config.blockprint.enabled { - if let Some(server) = config.blockprint.url { - let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?; - Some(WatchBlockprintClient { - client: reqwest::Client::new(), - server: blockprint_url, - username: config.blockprint.username, - password: config.blockprint.password, - }) - } else { - return Err(Error::NotEnabled( - "blockprint was enabled but url was not set".to_string(), - )); - } - } else { - None - }; - - let mut pool = database::build_connection_pool(&config.database)?; - - ensure_valid_database(&spec, &mut pool).await?; - - Ok(Self { - pool, - bn, - blockprint, - config: config.updater, - slots_per_epoch: spec.slots_per_epoch(), - _phantom: PhantomData, - }) - } - - /// Gets the syncing status of the connected beacon node. - pub async fn get_bn_syncing_status(&mut self) -> Result { - Ok(self.bn.get_node_syncing().await?.data) - } - - /// Gets a list of block roots from the database which do not yet contain a corresponding - /// entry in the `beacon_blocks` table and inserts them. - pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - let roots = database::get_unknown_canonical_blocks(&mut conn)?; - for root in roots { - let block_opt: Option> = - get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?; - if let Some(block) = block_opt { - database::insert_beacon_block(&mut conn, block, root)?; - } - } - - Ok(()) - } - - /// Performs a head update with the following steps: - /// 1. Pull the latest header from the beacon node and the latest canonical slot from the - /// database. - /// 2. Loop back through the beacon node and database to find the first matching slot -> root - /// pair. - /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is - /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. - /// 4. Remove any invalid slots from the database. - /// 5. Sync all blocks between the first valid block of the database and the head of the beacon - /// chain. - /// - /// In the event there are no slots present in the database, it will sync from the head block - /// block back to the first slot of the epoch. - /// This will ensure backfills are always done in full epochs (which helps keep certain syncing - /// tasks efficient). - pub async fn perform_head_update(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - // Load the head from the beacon node. - let bn_header = get_header(&self.bn, BlockId::Head) - .await? - .ok_or(Error::UnableToGetRemoteHead)?; - let header_root = bn_header.canonical_root(); - - if let Some(latest_matching_canonical_slot) = - self.get_first_matching_block(bn_header.clone()).await? - { - // Check for reorgs. - let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?; - - // Remove all slots above `latest_db_slot` from the database. - let result = database::delete_canonical_slots_above( - &mut conn, - WatchSlot::from_slot(latest_db_slot), - )?; - info!("{result} old records removed during head update"); - - if result > 0 { - // If slots were removed, we need to resync the suboptimal_attestations table for - // the epoch since they will have changed and cannot be fixed by a simple update. - let epoch = latest_db_slot - .epoch(self.slots_per_epoch) - .saturating_sub(1_u64); - debug!("Preparing to resync attestations above epoch {epoch}"); - database::delete_suboptimal_attestations_above( - &mut conn, - WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)), - )?; - } - - // Since we are syncing backwards, `start_slot > `end_slot`. - let start_slot = bn_header.slot; - let end_slot = latest_db_slot + 1; - self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) - .await?; - info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); - - // Attempt to sync new blocks with blockprint. - //self.sync_blockprint_until(start_slot).await?; - } else { - // There are no matching parent blocks. Sync from the head block back until the first - // block of the epoch. - let start_slot = bn_header.slot; - let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch); - self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) - .await?; - info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); - } - - Ok(()) - } - - /// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of - /// the block header as reported by the beacon node. - /// - /// Any blocks above this value are not canonical according to the beacon node. - /// - /// Note: In the event that there are skip slots above the slot returned by the function, - /// they will not be returned, so may be pruned or re-synced by other code despite being - /// canonical. - pub async fn get_first_matching_block( - &mut self, - mut bn_header: BeaconBlockHeader, - ) -> Result, Error> { - let mut conn = database::get_connection(&self.pool)?; - - // Load latest non-skipped canonical slot from database. - if let Some(db_canonical_slot) = - database::get_highest_non_skipped_canonical_slot(&mut conn)? - { - // Check if the header or parent root matches the entry in the database. - if bn_header.parent_root == db_canonical_slot.root.as_hash() - || bn_header.canonical_root() == db_canonical_slot.root.as_hash() - { - Ok(Some(db_canonical_slot)) - } else { - // Header is not the child of the highest entry in the database. - // From here we need to iterate backwards through the database until we find - // a slot -> root pair that matches the beacon node. - loop { - // Store working `parent_root`. - let parent_root = bn_header.parent_root; - - // Try the next header. - let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?; - if let Some(header) = next_header { - bn_header = header.clone(); - if let Some(db_canonical_slot) = database::get_canonical_slot_by_root( - &mut conn, - WatchHash::from_hash(header.parent_root), - )? { - // Check if the entry in the database matches the parent of - // the header. - if header.parent_root == db_canonical_slot.root.as_hash() { - return Ok(Some(db_canonical_slot)); - } else { - // Move on to the next header. - continue; - } - } else { - // Database does not have the referenced root. Try the next header. - continue; - } - } else { - // If we get this error it means that the `parent_root` of the header - // did not reference a canonical block. - return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock( - parent_root, - ))); - } - } - } - } else { - // There are no non-skipped blocks present in the database. - Ok(None) - } - } - - /// Given the latest slot in the database which matches a root in the beacon node, - /// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip - /// of the database is consistent with the beacon node (in the case that reorgs have occured). - /// - /// Returns the slot before the oldest canonical_slot which has an invalid child. - pub async fn check_for_reorg( - &mut self, - latest_canonical_slot: WatchCanonicalSlot, - ) -> Result { - let mut conn = database::get_connection(&self.pool)?; - - let end_slot = latest_canonical_slot.slot.as_u64(); - let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH); - - for i in start_slot..end_slot { - let slot = Slot::new(i); - let db_canonical_slot_opt = - database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?; - if let Some(db_canonical_slot) = db_canonical_slot_opt { - let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?; - if let Some(header) = header_opt { - if header.canonical_root() == db_canonical_slot.root.as_hash() { - // The roots match (or are both skip slots). - continue; - } else { - // The block roots do not match. We need to re-sync from here. - warn!("Block {slot} does not match the beacon node. Resyncing"); - return Ok(slot.saturating_sub(1_u64)); - } - } else if !db_canonical_slot.skipped { - // The block exists in the database, but does not exist on the beacon node. - // We need to re-sync from here. - warn!("Block {slot} does not exist on the beacon node. Resyncing"); - return Ok(slot.saturating_sub(1_u64)); - } - } else { - // This slot does not exist in the database. - let lowest_slot = database::get_lowest_canonical_slot(&mut conn)? - .map(|canonical_slot| canonical_slot.slot.as_slot()); - if lowest_slot > Some(slot) { - // The database has not back-filled this slot yet, so skip it. - continue; - } else { - // The database does not contain this block, but has back-filled past it. - // We need to resync from here. - warn!("Slot {slot} missing from database. Resyncing"); - return Ok(slot.saturating_sub(1_u64)); - } - } - } - - // The database is consistent with the beacon node, so return the head of the database. - Ok(latest_canonical_slot.slot.as_slot()) - } - - /// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`. - /// It fills in reverse order, that is, `start_slot` is higher than `end_slot`. - /// - /// Skip slots set `root` to the root of the previous non-skipped slot and also sets - /// `skipped == true`. - /// - /// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite - /// existing rows. This means that any part of the chain within `end_slot..=start_slot` that - /// needs to be resynced, must first be deleted from the database. - pub async fn reverse_fill_canonical_slots( - &mut self, - mut header: BeaconBlockHeader, - mut header_root: Hash256, - mut skipped: bool, - start_slot: Slot, - end_slot: Slot, - ) -> Result { - let mut count = 0; - - let mut conn = database::get_connection(&self.pool)?; - - // Iterate, descending from `start_slot` (higher) to `end_slot` (lower). - for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() { - // Insert header. - database::insert_canonical_slot( - &mut conn, - WatchCanonicalSlot { - slot: WatchSlot::new(slot), - root: WatchHash::from_hash(header_root), - skipped, - beacon_block: None, - }, - )?; - count += 1; - - // Load the next header: - // We must use BlockId::Slot since we want to include skip slots. - header = if let Some(new_header) = get_header( - &self.bn, - BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))), - ) - .await? - { - header_root = new_header.canonical_root(); - skipped = false; - new_header - } else { - if header.slot == 0 { - info!("Reverse fill exhausted at slot 0"); - break; - } - // Slot was skipped, so use the parent_root (most recent non-skipped block). - skipped = true; - header_root = header.parent_root; - header - }; - } - - Ok(count) - } - - /// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and - /// stopping after `max_backfill_size_epochs` epochs. - pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch; - // Check to see if we have finished backfilling. - if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? { - if lowest_slot.slot.as_slot() == backfill_stop_slot { - debug!("Backfill sync complete, all slots filled"); - return Ok(()); - } - } - - let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch; - - if let Some(lowest_non_skipped_canonical_slot) = - database::get_lowest_non_skipped_canonical_slot(&mut conn)? - { - // Set `start_slot` equal to the lowest non-skipped slot in the database. - // While this will attempt to resync some parts of the bottom of the chain, it reduces - // complexity when dealing with skip slots. - let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot(); - let mut end_slot = lowest_non_skipped_canonical_slot - .slot - .as_slot() - .saturating_sub(backfill_slot_count); - - // Ensure end_slot doesn't go below `backfill_stop_epoch` - if end_slot <= backfill_stop_slot { - end_slot = Slot::new(backfill_stop_slot); - } - - let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?; - - if let Some(header) = header_opt { - let header_root = header.canonical_root(); - let count = self - .reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot) - .await?; - - info!("Backfill completed to slot: {end_slot}, records added: {count}"); - } else { - // The lowest slot of the database is inconsistent with the beacon node. - // Currently we have no way to recover from this. The entire database will need to - // be re-synced. - error!( - "Database is inconsistent with the beacon node. \ - Please ensure your beacon node is set to the right network, \ - otherwise you may need to resync" - ); - } - } else { - // There are no blocks in the database. Forward sync needs to happen first. - info!("Backfill was not performed since there are no blocks in the database"); - return Ok(()); - }; - - Ok(()) - } - - // Attempt to update the validator set. - // This downloads the latest validator set from the beacon node, and pulls the known validator - // set from the database. - // We then take any new or updated validators and insert them into the database (overwriting - // exiting validators). - // - // In the event there are no validators in the database, it will initialize the validator set. - pub async fn update_validator_set(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - - let current_validators = database::get_all_validators(&mut conn)?; - - if !current_validators.is_empty() { - let old_validators = HashSet::from_iter(current_validators); - - // Pull the new validator set from the beacon node. - let new_validators = get_validators(&self.bn).await?; - - // The difference should only contain validators that contain either a new `exit_epoch` (implying an - // exit) or a new `index` (implying a validator activation). - let val_diff = new_validators.difference(&old_validators); - - for diff in val_diff { - database::insert_validator(&mut conn, diff.clone())?; - } - } else { - info!("No validators present in database. Initializing the validator set"); - self.initialize_validator_set().await?; - } - - Ok(()) - } - - // Initialize the validator set by downloading it from the beacon node, inserting blockprint - // data (if required) and writing it to the database. - pub async fn initialize_validator_set(&mut self) -> Result<(), Error> { - let mut conn = database::get_connection(&self.pool)?; - - // Pull all validators from the beacon node. - let validators = Vec::from_iter(get_validators(&self.bn).await?); - - database::insert_batch_validators(&mut conn, validators)?; - - Ok(()) - } -} diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs deleted file mode 100644 index 65e0a90a2b4..00000000000 --- a/watch/src/updater/mod.rs +++ /dev/null @@ -1,234 +0,0 @@ -use crate::config::Config as FullConfig; -use crate::database::{WatchPK, WatchValidator}; -use eth2::{ - types::{BlockId, StateId}, - BeaconNodeHttpClient, SensitiveUrl, Timeouts, -}; -use log::{debug, error, info}; -use std::collections::{HashMap, HashSet}; -use std::marker::PhantomData; -use std::time::{Duration, Instant}; -use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock}; - -pub use config::Config; -pub use error::Error; -pub use handler::UpdateHandler; - -mod config; -pub mod error; -pub mod handler; - -const FAR_FUTURE_EPOCH: u64 = u64::MAX; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); - -const MAINNET: &str = "mainnet"; -const GNOSIS: &str = "gnosis"; - -pub struct WatchSpec { - network: String, - spec: PhantomData, -} - -impl WatchSpec { - fn slots_per_epoch(&self) -> u64 { - E::slots_per_epoch() - } -} - -impl WatchSpec { - pub fn mainnet(network: String) -> Self { - Self { - network, - spec: PhantomData, - } - } -} - -impl WatchSpec { - fn gnosis(network: String) -> Self { - Self { - network, - spec: PhantomData, - } - } -} - -pub async fn run_updater(config: FullConfig) -> Result<(), Error> { - let beacon_node_url = - SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?; - let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); - - let config_map = bn.get_config_spec::>().await?.data; - - let config_name = config_map - .get("CONFIG_NAME") - .ok_or_else(|| { - Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string()) - })? - .clone(); - - match config_map - .get("PRESET_BASE") - .ok_or_else(|| { - Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string()) - })? - .to_lowercase() - .as_str() - { - MAINNET => { - let spec = WatchSpec::mainnet(config_name); - run_once(bn, spec, config).await - } - GNOSIS => { - let spec = WatchSpec::gnosis(config_name); - run_once(bn, spec, config).await - } - _ => unimplemented!("unsupported PRESET_BASE"), - } -} - -pub async fn run_once( - bn: BeaconNodeHttpClient, - spec: WatchSpec, - config: FullConfig, -) -> Result<(), Error> { - let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?; - - let sync_data = watch.get_bn_syncing_status().await?; - if sync_data.is_syncing { - error!( - "Connected beacon node is still syncing: head_slot => {:?}, distance => {}", - sync_data.head_slot, sync_data.sync_distance - ); - return Err(Error::BeaconNodeSyncing); - } - - info!("Performing head update"); - let head_timer = Instant::now(); - watch.perform_head_update().await?; - let head_timer_elapsed = head_timer.elapsed(); - debug!("Head update complete, time taken: {head_timer_elapsed:?}"); - - info!("Performing block backfill"); - let block_backfill_timer = Instant::now(); - watch.backfill_canonical_slots().await?; - let block_backfill_timer_elapsed = block_backfill_timer.elapsed(); - debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}"); - - info!("Updating validator set"); - let validator_timer = Instant::now(); - watch.update_validator_set().await?; - let validator_timer_elapsed = validator_timer.elapsed(); - debug!("Validator update complete, time taken: {validator_timer_elapsed:?}"); - - // Update blocks after updating the validator set since the `proposer_index` must exist in the - // `validators` table. - info!("Updating unknown blocks"); - let unknown_block_timer = Instant::now(); - watch.update_unknown_blocks().await?; - let unknown_block_timer_elapsed = unknown_block_timer.elapsed(); - debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}"); - - // Run additional modules - if config.updater.attestations { - info!("Updating suboptimal attestations"); - let attestation_timer = Instant::now(); - watch.fill_suboptimal_attestations().await?; - watch.backfill_suboptimal_attestations().await?; - let attestation_timer_elapsed = attestation_timer.elapsed(); - debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}"); - } - - if config.updater.block_rewards { - info!("Updating block rewards"); - let rewards_timer = Instant::now(); - watch.fill_block_rewards().await?; - watch.backfill_block_rewards().await?; - let rewards_timer_elapsed = rewards_timer.elapsed(); - debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}"); - } - - if config.updater.block_packing { - info!("Updating block packing statistics"); - let packing_timer = Instant::now(); - watch.fill_block_packing().await?; - watch.backfill_block_packing().await?; - let packing_timer_elapsed = packing_timer.elapsed(); - debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}"); - } - - if config.blockprint.enabled { - info!("Updating blockprint"); - let blockprint_timer = Instant::now(); - watch.fill_blockprint().await?; - watch.backfill_blockprint().await?; - let blockprint_timer_elapsed = blockprint_timer.elapsed(); - debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}"); - } - - Ok(()) -} - -/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists. -pub async fn get_header( - bn: &BeaconNodeHttpClient, - block_id: BlockId, -) -> Result, Error> { - let resp = bn - .get_beacon_headers_block_id(block_id) - .await? - .map(|resp| (resp.data.root, resp.data.header.message)); - // When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000... - // This check escapes the loop. - if let Some((root, header)) = resp { - if root == header.parent_root { - return Ok(None); - } else { - return Ok(Some(header)); - } - } - Ok(None) -} - -pub async fn get_beacon_block( - bn: &BeaconNodeHttpClient, - block_id: BlockId, -) -> Result>, Error> { - let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data); - - Ok(block) -} - -/// Queries the beacon node for the current validator set. -pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result, Error> { - let mut validator_map = HashSet::new(); - - let validators = bn - .get_beacon_states_validators(StateId::Head, None, None) - .await? - .ok_or(Error::NoValidatorsFound)? - .data; - - for val in validators { - // Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`. - let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH { - None - } else { - Some(val.validator.activation_epoch.as_u64() as i32) - }; - // Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`. - let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH { - None - } else { - Some(val.validator.exit_epoch.as_u64() as i32) - }; - validator_map.insert(WatchValidator { - index: val.index as i32, - public_key: WatchPK::from_pubkey(val.validator.pubkey), - status: val.status.to_string(), - activation_epoch, - exit_epoch, - }); - } - Ok(validator_map) -} diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs deleted file mode 100644 index e21cf151b11..00000000000 --- a/watch/tests/tests.rs +++ /dev/null @@ -1,1294 +0,0 @@ -#![recursion_limit = "256"] -#![cfg(unix)] - -use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - ChainConfig, -}; -use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; -use http_api::test_utils::{create_api_server, ApiServer}; -use log::error; -use logging::test_logger; -use network::NetworkReceivers; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use std::collections::HashMap; -use std::env; -use std::time::Duration; -use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; -use tokio::{runtime, task::JoinHandle}; -use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; -use types::{Hash256, MainnetEthSpec, Slot}; -use unused_port::unused_tcp4_port; -use url::Url; -use watch::{ - client::WatchHttpClient, - config::Config, - database::{self, Config as DatabaseConfig, PgPool, WatchSlot}, - server::{start_server, Config as ServerConfig}, - updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, -}; - -#[derive(Debug)] -pub struct Postgres(HashMap); - -impl Default for Postgres { - fn default() -> Self { - let mut env_vars = HashMap::new(); - env_vars.insert("POSTGRES_DB".to_owned(), "postgres".to_owned()); - env_vars.insert("POSTGRES_HOST_AUTH_METHOD".into(), "trust".into()); - - Self(env_vars) - } -} - -impl Image for Postgres { - type Args = (); - - fn name(&self) -> String { - "postgres".to_owned() - } - - fn tag(&self) -> String { - "11-alpine".to_owned() - } - - fn ready_conditions(&self) -> Vec { - vec![WaitFor::message_on_stderr( - "database system is ready to accept connections", - )] - } - - fn env_vars(&self) -> Box + '_> { - Box::new(self.0.iter()) - } -} - -type E = MainnetEthSpec; - -const VALIDATOR_COUNT: usize = 32; -const SLOTS_PER_EPOCH: u64 = 32; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); - -/// Set this environment variable to use a different hostname for connecting to -/// the database. Can be set to `host.docker.internal` for docker-in-docker -/// setups. -const WATCH_HOST_ENV_VARIABLE: &str = "WATCH_HOST"; - -fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { - let mut postgres_config = PostgresConfig::new(); - postgres_config - .user(&config.user) - .password(&config.password) - .dbname(&config.default_dbname) - .host(&config.host) - .port(config.port) - .connect_timeout(Duration::from_millis(config.connect_timeout_millis)); - postgres_config -} - -async fn connect(config: &DatabaseConfig) -> (Client, JoinHandle<()>) { - let db_config = build_test_config(config); - let (client, conn) = db_config - .connect(NoTls) - .await - .expect("Could not connect to db"); - let connection = runtime::Handle::current().spawn(async move { - if let Err(e) = conn.await { - error!("Connection error {:?}", e); - } - }); - - (client, connection) -} - -pub async fn create_test_database(config: &DatabaseConfig) { - let (db, _) = connect(config).await; - - db.execute(&format!("CREATE DATABASE {};", config.dbname), &[]) - .await - .expect("Database creation failed"); -} - -pub fn get_host_from_env() -> String { - env::var(WATCH_HOST_ENV_VARIABLE).unwrap_or_else(|_| "localhost".to_string()) -} - -struct TesterBuilder { - pub harness: BeaconChainHarness>, - pub config: Config, - _bn_network_rx: NetworkReceivers, -} - -impl TesterBuilder { - pub async fn new() -> TesterBuilder { - let harness = BeaconChainHarness::builder(E::default()) - .default_spec() - .chain_config(ChainConfig { - reconstruct_historic_states: true, - ..ChainConfig::default() - }) - .logger(test_logger()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); - - /* - * Spawn a Beacon Node HTTP API. - */ - let ApiServer { - server, - listening_socket: bn_api_listening_socket, - network_rx: _bn_network_rx, - .. - } = create_api_server( - harness.chain.clone(), - &harness.runtime, - harness.logger().clone(), - ) - .await; - tokio::spawn(server); - - /* - * Create a watch configuration - */ - let database_port = unused_tcp4_port().expect("Unable to find unused port."); - let server_port = 0; - let config = Config { - database: DatabaseConfig { - dbname: random_dbname(), - port: database_port, - host: get_host_from_env(), - ..Default::default() - }, - server: ServerConfig { - listen_port: server_port, - ..Default::default() - }, - updater: UpdaterConfig { - beacon_node_url: format!( - "http://{}:{}", - bn_api_listening_socket.ip(), - bn_api_listening_socket.port() - ), - ..Default::default() - }, - ..Default::default() - }; - - Self { - harness, - config, - _bn_network_rx, - } - } - pub async fn build(self, pool: PgPool) -> Tester { - /* - * Spawn a Watch HTTP API. - */ - let (addr, watch_server) = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); - tokio::spawn(watch_server); - - /* - * Create a HTTP client to talk to the watch HTTP API. - */ - let client = WatchHttpClient { - client: reqwest::Client::new(), - server: Url::parse(&format!("http://{}:{}", addr.ip(), addr.port())).unwrap(), - }; - - /* - * Create a HTTP client to talk to the Beacon Node API. - */ - let beacon_node_url = SensitiveUrl::parse(&self.config.updater.beacon_node_url).unwrap(); - let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); - let spec = WatchSpec::mainnet("mainnet".to_string()); - - /* - * Build update service - */ - let updater = UpdateHandler::new(bn, spec, self.config.clone()) - .await - .unwrap(); - - Tester { - harness: self.harness, - client, - config: self.config, - updater, - _bn_network_rx: self._bn_network_rx, - } - } - async fn initialize_database(&self) -> PgPool { - create_test_database(&self.config.database).await; - database::utils::run_migrations(&self.config.database); - database::build_connection_pool(&self.config.database) - .expect("Could not build connection pool") - } -} - -struct Tester { - pub harness: BeaconChainHarness>, - pub client: WatchHttpClient, - pub config: Config, - pub updater: UpdateHandler, - _bn_network_rx: NetworkReceivers, -} - -impl Tester { - /// Extend the chain on the beacon chain harness. Do not update the beacon watch database. - pub async fn extend_chain(&mut self, num_blocks: u64) -> &mut Self { - self.harness.advance_slot(); - self.harness - .extend_chain( - num_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - self - } - - // Advance the slot clock without a block. This results in a skipped slot. - pub fn skip_slot(&mut self) -> &mut Self { - self.harness.advance_slot(); - self - } - - // Perform a single slot re-org. - pub async fn reorg_chain(&mut self) -> &mut Self { - let previous_slot = self.harness.get_current_slot(); - self.harness.advance_slot(); - let first_slot = self.harness.get_current_slot(); - self.harness - .extend_chain( - 1, - BlockStrategy::ForkCanonicalChainAt { - previous_slot, - first_slot, - }, - AttestationStrategy::AllValidators, - ) - .await; - self - } - - /// Run the watch updater service. - pub async fn run_update_service(&mut self, num_runs: usize) -> &mut Self { - for _ in 0..num_runs { - run_updater(self.config.clone()).await.unwrap(); - } - self - } - - pub async fn perform_head_update(&mut self) -> &mut Self { - self.updater.perform_head_update().await.unwrap(); - self - } - - pub async fn perform_backfill(&mut self) -> &mut Self { - self.updater.backfill_canonical_slots().await.unwrap(); - self - } - - pub async fn update_unknown_blocks(&mut self) -> &mut Self { - self.updater.update_unknown_blocks().await.unwrap(); - self - } - - pub async fn update_validator_set(&mut self) -> &mut Self { - self.updater.update_validator_set().await.unwrap(); - self - } - - pub async fn fill_suboptimal_attestations(&mut self) -> &mut Self { - self.updater.fill_suboptimal_attestations().await.unwrap(); - - self - } - - pub async fn backfill_suboptimal_attestations(&mut self) -> &mut Self { - self.updater - .backfill_suboptimal_attestations() - .await - .unwrap(); - - self - } - - pub async fn fill_block_rewards(&mut self) -> &mut Self { - self.updater.fill_block_rewards().await.unwrap(); - - self - } - - pub async fn backfill_block_rewards(&mut self) -> &mut Self { - self.updater.backfill_block_rewards().await.unwrap(); - - self - } - - pub async fn fill_block_packing(&mut self) -> &mut Self { - self.updater.fill_block_packing().await.unwrap(); - - self - } - - pub async fn backfill_block_packing(&mut self) -> &mut Self { - self.updater.backfill_block_packing().await.unwrap(); - - self - } - - pub async fn assert_canonical_slots_empty(&mut self) -> &mut Self { - let lowest_slot = self - .client - .get_lowest_canonical_slot() - .await - .unwrap() - .map(|slot| slot.slot.as_slot()); - - assert_eq!(lowest_slot, None); - - self - } - - pub async fn assert_lowest_canonical_slot(&mut self, expected: u64) -> &mut Self { - let slot = self - .client - .get_lowest_canonical_slot() - .await - .unwrap() - .unwrap() - .slot - .as_slot(); - - assert_eq!(slot, Slot::new(expected)); - - self - } - - pub async fn assert_highest_canonical_slot(&mut self, expected: u64) -> &mut Self { - let slot = self - .client - .get_highest_canonical_slot() - .await - .unwrap() - .unwrap() - .slot - .as_slot(); - - assert_eq!(slot, Slot::new(expected)); - - self - } - - pub async fn assert_canonical_slots_not_empty(&mut self) -> &mut Self { - self.client - .get_lowest_canonical_slot() - .await - .unwrap() - .unwrap(); - - self - } - - pub async fn assert_slot_is_skipped(&mut self, slot: u64) -> &mut Self { - assert!(self - .client - .get_beacon_blocks(BlockId::Slot(Slot::new(slot))) - .await - .unwrap() - .is_none()); - self - } - - pub async fn assert_all_validators_exist(&mut self) -> &mut Self { - assert_eq!( - self.client - .get_all_validators() - .await - .unwrap() - .unwrap() - .len(), - VALIDATOR_COUNT - ); - self - } - - pub async fn assert_lowest_block_has_proposer_info(&mut self) -> &mut Self { - let mut block = self - .client - .get_lowest_beacon_block() - .await - .unwrap() - .unwrap(); - - if block.slot.as_slot() == 0 { - block = self - .client - .get_next_beacon_block(block.root.as_hash()) - .await - .unwrap() - .unwrap() - } - - self.client - .get_proposer_info(BlockId::Root(block.root.as_hash())) - .await - .unwrap() - .unwrap(); - - self - } - - pub async fn assert_highest_block_has_proposer_info(&mut self) -> &mut Self { - let block = self - .client - .get_highest_beacon_block() - .await - .unwrap() - .unwrap(); - - self.client - .get_proposer_info(BlockId::Root(block.root.as_hash())) - .await - .unwrap() - .unwrap(); - - self - } - - pub async fn assert_lowest_block_has_block_rewards(&mut self) -> &mut Self { - let mut block = self - .client - .get_lowest_beacon_block() - .await - .unwrap() - .unwrap(); - - if block.slot.as_slot() == 0 { - block = self - .client - .get_next_beacon_block(block.root.as_hash()) - .await - .unwrap() - .unwrap() - } - - self.client - .get_block_reward(BlockId::Root(block.root.as_hash())) - .await - .unwrap() - .unwrap(); - - self - } - - pub async fn assert_highest_block_has_block_rewards(&mut self) -> &mut Self { - let block = self - .client - .get_highest_beacon_block() - .await - .unwrap() - .unwrap(); - - self.client - .get_block_reward(BlockId::Root(block.root.as_hash())) - .await - .unwrap() - .unwrap(); - - self - } - - pub async fn assert_lowest_block_has_block_packing(&mut self) -> &mut Self { - let mut block = self - .client - .get_lowest_beacon_block() - .await - .unwrap() - .unwrap(); - - while block.slot.as_slot() <= SLOTS_PER_EPOCH { - block = self - .client - .get_next_beacon_block(block.root.as_hash()) - .await - .unwrap() - .unwrap() - } - - self.client - .get_block_packing(BlockId::Root(block.root.as_hash())) - .await - .unwrap() - .unwrap(); - - self - } - - pub async fn assert_highest_block_has_block_packing(&mut self) -> &mut Self { - let block = self - .client - .get_highest_beacon_block() - .await - .unwrap() - .unwrap(); - - self.client - .get_block_packing(BlockId::Root(block.root.as_hash())) - .await - .unwrap() - .unwrap(); - - self - } - - /// Check that the canonical chain in watch matches that of the harness. Also check that all - /// canonical blocks can be retrieved. - pub async fn assert_canonical_chain_consistent(&mut self, last_slot: u64) -> &mut Self { - let head_root = self.harness.chain.head_beacon_block_root(); - let mut chain: Vec<(Hash256, Slot)> = self - .harness - .chain - .rev_iter_block_roots_from(head_root) - .unwrap() - .map(Result::unwrap) - .collect(); - - // `chain` contains skip slots, but the `watch` API will not return blocks that do not - // exist. - // We need to filter them out. - chain.reverse(); - chain.dedup_by(|(hash1, _), (hash2, _)| hash1 == hash2); - - // Remove any slots below `last_slot` since it is known that the database has not - // backfilled past it. - chain.retain(|(_, slot)| slot.as_u64() >= last_slot); - - for (root, slot) in &chain { - let block = self - .client - .get_beacon_blocks(BlockId::Root(*root)) - .await - .unwrap() - .unwrap(); - assert_eq!(block.slot.as_slot(), *slot); - } - - self - } - - /// Check that every block in the `beacon_blocks` table has corresponding entries in the - /// `proposer_info`, `block_rewards` and `block_packing` tables. - pub async fn assert_all_blocks_have_metadata(&mut self) -> &mut Self { - let pool = database::build_connection_pool(&self.config.database).unwrap(); - - let mut conn = database::get_connection(&pool).unwrap(); - let highest_block_slot = database::get_highest_beacon_block(&mut conn) - .unwrap() - .unwrap() - .slot - .as_slot(); - let lowest_block_slot = database::get_lowest_beacon_block(&mut conn) - .unwrap() - .unwrap() - .slot - .as_slot(); - for slot in lowest_block_slot.as_u64()..=highest_block_slot.as_u64() { - let canonical_slot = database::get_canonical_slot(&mut conn, WatchSlot::new(slot)) - .unwrap() - .unwrap(); - if !canonical_slot.skipped { - database::get_block_rewards_by_slot(&mut conn, WatchSlot::new(slot)) - .unwrap() - .unwrap(); - database::get_proposer_info_by_slot(&mut conn, WatchSlot::new(slot)) - .unwrap() - .unwrap(); - database::get_block_packing_by_slot(&mut conn, WatchSlot::new(slot)) - .unwrap() - .unwrap(); - } - } - - self - } -} - -pub fn random_dbname() -> String { - let mut s: String = thread_rng() - .sample_iter(&Alphanumeric) - .take(8) - .map(char::from) - .collect(); - // Postgres gets weird about capitals in database names. - s.make_ascii_lowercase(); - format!("test_{}", s) -} - -#[cfg(unix)] -#[tokio::test] -async fn short_chain() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - tester - .extend_chain(16) - .await - .assert_canonical_slots_empty() - .await - .run_update_service(1) - .await - .assert_all_validators_exist() - .await - .assert_canonical_slots_not_empty() - .await - .assert_canonical_chain_consistent(0) - .await; -} - -#[cfg(unix)] -#[tokio::test] -async fn short_chain_sync_starts_on_skip_slot() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - tester - .skip_slot() - .skip_slot() - .extend_chain(6) - .await - .skip_slot() - .extend_chain(6) - .await - .skip_slot() - .assert_canonical_slots_empty() - .await - .run_update_service(1) - .await - .assert_all_validators_exist() - .await - .assert_canonical_slots_not_empty() - .await - .assert_canonical_chain_consistent(0) - .await - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await; -} - -#[cfg(unix)] -#[tokio::test] -async fn short_chain_with_skip_slot() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - tester - .extend_chain(5) - .await - .assert_canonical_slots_empty() - .await - .run_update_service(1) - .await - .assert_all_validators_exist() - .await - .assert_canonical_slots_not_empty() - .await - .assert_highest_canonical_slot(5) - .await - .assert_lowest_canonical_slot(0) - .await - .assert_canonical_chain_consistent(0) - .await - .skip_slot() - .extend_chain(1) - .await - .run_update_service(1) - .await - .assert_all_validators_exist() - .await - .assert_highest_canonical_slot(7) - .await - .assert_slot_is_skipped(6) - .await - .assert_canonical_chain_consistent(0) - .await; -} - -#[cfg(unix)] -#[tokio::test] -async fn short_chain_with_reorg() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - tester - .extend_chain(5) - .await - .assert_canonical_slots_empty() - .await - .run_update_service(1) - .await - .assert_all_validators_exist() - .await - .assert_canonical_slots_not_empty() - .await - .assert_highest_canonical_slot(5) - .await - .assert_lowest_canonical_slot(0) - .await - .assert_canonical_chain_consistent(0) - .await - .skip_slot() - .reorg_chain() - .await - .extend_chain(1) - .await - .run_update_service(1) - .await - .assert_all_validators_exist() - .await - .assert_highest_canonical_slot(8) - .await - .assert_slot_is_skipped(6) - .await - .assert_canonical_chain_consistent(0) - .await; -} - -#[cfg(unix)] -#[tokio::test] -async fn chain_grows() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - // Apply four blocks to the chain. - tester - .extend_chain(4) - .await - .perform_head_update() - .await - // Head update should insert the head block. - .assert_highest_canonical_slot(4) - .await - // And also backfill to the epoch boundary. - .assert_lowest_canonical_slot(0) - .await - // Fill back to genesis. - .perform_backfill() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(4) - .await - // Apply one block to the chain. - .extend_chain(1) - .await - .perform_head_update() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(5) - .await - // Apply two blocks to the chain. - .extend_chain(2) - .await - // Update the head. - .perform_head_update() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(7) - .await - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // Check the chain is consistent - .assert_canonical_chain_consistent(0) - .await; -} - -#[cfg(unix)] -#[tokio::test] -#[allow(clippy::large_stack_frames)] -async fn chain_grows_with_metadata() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - tester - // Apply four blocks to the chain. - .extend_chain(4) - .await - .perform_head_update() - .await - // Head update should insert the head block. - .assert_highest_canonical_slot(4) - .await - // And also backfill to the epoch boundary. - .assert_lowest_canonical_slot(0) - .await - // Fill back to genesis. - .perform_backfill() - .await - // Insert all validators - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // All validators should be present. - .assert_all_validators_exist() - .await - // Check the chain is consistent - .assert_canonical_chain_consistent(0) - .await - // Get other chain data. - // Backfill before forward fill to ensure order is arbitrary. - .backfill_block_rewards() - .await - .fill_block_rewards() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(4) - .await - // All rewards should be present. - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await - // All proposers should be present. - .assert_lowest_block_has_proposer_info() - .await - .assert_highest_block_has_proposer_info() - .await - // Apply one block to the chain. - .extend_chain(1) - .await - .perform_head_update() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(5) - .await - // Apply two blocks to the chain. - .extend_chain(2) - .await - // Update the head. - .perform_head_update() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(7) - .await - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // Check the chain is consistent - .assert_canonical_chain_consistent(0) - .await - // Get other chain data. - .fill_block_rewards() - .await - .backfill_block_rewards() - .await - // All rewards should be present. - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await - // All proposers should be present. - .assert_lowest_block_has_proposer_info() - .await - .assert_highest_block_has_proposer_info() - .await; -} - -#[cfg(unix)] -#[tokio::test] -#[allow(clippy::large_stack_frames)] -async fn chain_grows_with_metadata_and_multiple_skip_slots() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - - // Apply four blocks to the chain. - tester - .extend_chain(4) - .await - .perform_head_update() - .await - // Head update should insert the head block. - .assert_highest_canonical_slot(4) - // And also backfill to the epoch boundary. - .await - .assert_lowest_canonical_slot(0) - .await - // Fill back to genesis. - .perform_backfill() - .await - // Insert all validators - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // All validators should be present. - .assert_all_validators_exist() - .await - // Check the chain is consistent. - .assert_canonical_chain_consistent(0) - .await - // Get other chain data. - .fill_block_rewards() - .await - .backfill_block_rewards() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(4) - .await - // All rewards should be present. - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await - // All proposers should be present. - .assert_lowest_block_has_proposer_info() - .await - .assert_highest_block_has_proposer_info() - .await - // Add multiple skip slots. - .skip_slot() - .skip_slot() - .skip_slot() - // Apply one block to the chain. - .extend_chain(1) - .await - .perform_head_update() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(8) - .await - // Apply two blocks to the chain. - .extend_chain(2) - .await - // Update the head. - .perform_head_update() - .await - // All validators should be present. - .assert_all_validators_exist() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(10) - .await - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // Check the chain is consistent - .assert_canonical_chain_consistent(0) - .await - // Get other chain data. - // Backfill before forward fill to ensure order is arbitrary. - .backfill_block_rewards() - .await - .fill_block_rewards() - .await - // All rewards should be present. - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await - // All proposers should be present. - .assert_lowest_block_has_proposer_info() - .await - .assert_highest_block_has_proposer_info() - .await; -} - -#[cfg(unix)] -#[tokio::test] -async fn chain_grows_to_second_epoch() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - // Apply 40 blocks to the chain. - tester - .extend_chain(40) - .await - .perform_head_update() - .await - // Head update should insert the head block. - .assert_highest_canonical_slot(40) - .await - // And also backfill to the epoch boundary. - .assert_lowest_canonical_slot(32) - .await - // Fill back to genesis. - .perform_backfill() - .await - // Insert all validators - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // All validators should be present. - .assert_all_validators_exist() - .await - // Check the chain is consistent. - .assert_canonical_chain_consistent(0) - .await - // Get block packings. - .fill_block_packing() - .await - .backfill_block_packing() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(40) - .await - // All packings should be present. - .assert_lowest_block_has_block_packing() - .await - .assert_highest_block_has_block_packing() - .await - // Skip a slot - .skip_slot() - // Apply two blocks to the chain. - .extend_chain(2) - .await - // Update the head. - .perform_head_update() - .await - // All blocks should be present. - .assert_lowest_canonical_slot(0) - .await - .assert_highest_canonical_slot(43) - .await - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // Update new block_packing - // Backfill before forward fill to ensure order is arbitrary - .backfill_block_packing() - .await - .fill_block_packing() - .await - // All packings should be present. - .assert_lowest_block_has_block_packing() - .await - .assert_highest_block_has_block_packing() - .await - // Check the chain is consistent - .assert_canonical_chain_consistent(0) - .await; -} - -#[cfg(unix)] -#[tokio::test] -async fn large_chain() { - let builder = TesterBuilder::new().await; - - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_mapped_port((builder.config.database.port, 5432)); - let _node = docker.run(image); - - let pool = builder.initialize_database().await; - let mut tester = builder.build(pool).await; - // Apply 40 blocks to the chain. - tester - .extend_chain(400) - .await - .perform_head_update() - .await - // Head update should insert the head block. - .assert_highest_canonical_slot(400) - .await - // And also backfill to the epoch boundary. - .assert_lowest_canonical_slot(384) - .await - // Backfill 2 epochs as per default config. - .perform_backfill() - .await - // Insert all validators - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // All validators should be present. - .assert_all_validators_exist() - .await - // Check the chain is consistent. - .assert_canonical_chain_consistent(384) - .await - // Get block rewards and proposer info. - .fill_block_rewards() - .await - .backfill_block_rewards() - .await - // Get block packings. - .fill_block_packing() - .await - .backfill_block_packing() - .await - // Should have backfilled 2 more epochs. - .assert_lowest_canonical_slot(320) - .await - .assert_highest_canonical_slot(400) - .await - // All rewards should be present. - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await - // All proposers should be present. - .assert_lowest_block_has_proposer_info() - .await - .assert_highest_block_has_proposer_info() - .await - // All packings should be present. - .assert_lowest_block_has_block_packing() - .await - .assert_highest_block_has_block_packing() - .await - // Skip a slot - .skip_slot() - // Apply two blocks to the chain. - .extend_chain(2) - .await - // Update the head. - .perform_head_update() - .await - .perform_backfill() - .await - // Should have backfilled 2 more epochs - .assert_lowest_canonical_slot(256) - .await - .assert_highest_canonical_slot(403) - .await - // Update validators - .update_validator_set() - .await - // Insert all blocks. - .update_unknown_blocks() - .await - // All validators should be present. - .assert_all_validators_exist() - .await - // Get suboptimal attestations. - .fill_suboptimal_attestations() - .await - .backfill_suboptimal_attestations() - .await - // Get block rewards and proposer info. - .fill_block_rewards() - .await - .backfill_block_rewards() - .await - // Get block packing. - // Backfill before forward fill to ensure order is arbitrary. - .backfill_block_packing() - .await - .fill_block_packing() - .await - // All rewards should be present. - .assert_lowest_block_has_block_rewards() - .await - .assert_highest_block_has_block_rewards() - .await - // All proposers should be present. - .assert_lowest_block_has_proposer_info() - .await - .assert_highest_block_has_proposer_info() - .await - // All packings should be present. - .assert_lowest_block_has_block_packing() - .await - .assert_highest_block_has_block_packing() - .await - // Check the chain is consistent. - .assert_canonical_chain_consistent(256) - .await - // Check every block has rewards, proposer info and packing statistics. - .assert_all_blocks_have_metadata() - .await; -}