diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000000..f9478d13691 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +beacon_node/network/ @jxs +beacon_node/lighthouse_network/ @jxs diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cd45bd6d98f..e7682089731 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,8 +13,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index d496cc6348e..1cd2f24548e 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -40,7 +40,7 @@ jobs: run: | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli=1.3.1 + sudo apt install -y kurtosis-cli kurtosis analytics disable - name: Download Docker image artifact @@ -86,7 +86,7 @@ jobs: run: | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli=1.3.1 + sudo apt install -y kurtosis-cli kurtosis analytics disable - name: Download Docker image artifact @@ -121,7 +121,7 @@ jobs: run: | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install -y kurtosis-cli=1.3.1 + sudo apt install -y kurtosis-cli kurtosis analytics disable - name: Download Docker image artifact diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f1ec2e46551..cfba601fad6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,8 +10,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} REPO_NAME: ${{ github.repository_owner }}/lighthouse IMAGE_NAME: ${{ github.repository_owner }}/lighthouse # Enable self-hosted runners for the sigp repo only. diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index d6ef1809341..45f3b757e74 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -83,6 +83,11 @@ jobs: runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 + # Set Java version to 21. (required since Web3Signer 24.12.0). + - uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '21' - name: Get latest version of stable Rust if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 @@ -345,7 +350,7 @@ jobs: - name: Check formatting with cargo fmt run: make cargo-fmt - name: Lint code for quality and style with Clippy - run: make lint + run: make lint-full - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock - name: Typecheck benchmark code without running it @@ -358,6 +363,8 @@ jobs: run: CARGO_HOME=$(readlink -f $HOME) make vendor - name: Markdown-linter run: make mdlint + - name: Spell-check + uses: rojopolis/spellcheck-github-actions@v0 check-msrv: name: check-msrv runs-on: ubuntu-latest @@ -421,6 +428,21 @@ jobs: cache-target: release - name: Run Makefile to trigger the bash script run: make cli-local + cargo-sort: + name: cargo-sort + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-sort + - name: Run cargo sort to check if Cargo.toml files are sorted + run: cargo sort --check --workspace # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: @@ -448,6 +470,7 @@ jobs: 'compile-with-beta-compiler', 'cli-check', 'lockbud', + 'cargo-sort', ] steps: - uses: actions/checkout@v4 diff --git a/.spellcheck.yml b/.spellcheck.yml new file mode 100644 index 00000000000..692bc4d176c --- /dev/null +++ b/.spellcheck.yml @@ -0,0 +1,35 @@ +matrix: +- name: Markdown + sources: + - './book/**/*.md' + - 'README.md' + - 'CONTRIBUTING.md' + - 'SECURITY.md' + - './scripts/local_testnet/README.md' + default_encoding: utf-8 + aspell: + lang: en + dictionary: + wordlists: + - wordlist.txt + encoding: utf-8 + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.markdown: + markdown_extensions: + - pymdownx.superfences: + - pymdownx.highlight: + - pymdownx.striphtml: + - pymdownx.magiclink: + - pyspelling.filters.html: + comments: false + ignores: + - code + - pre + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore hex strings + - open: '0x[a-fA-F0-9]' + close: '[^a-fA-F0-9]' + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c53558a100..4cad219c89f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,7 +85,7 @@ steps: 5. Commit your changes and push them to your fork with `$ git push origin your_feature_name`. 6. Go to your fork on github.com and use the web interface to create a pull - request into the sigp/lighthouse repo. + request into the sigp/lighthouse repository. From there, the repository maintainers will review the PR and either accept it or provide some constructive feedback. diff --git a/Cargo.lock b/Cargo.lock index b7ba237ac76..c62e9fbc878 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,6 +36,7 @@ dependencies = [ "tokio", "types", "validator_dir", + "zeroize", ] [[package]] @@ -59,9 +60,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -149,9 +150,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-consensus" @@ -177,9 +178,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d319bb544ca6caeab58c39cea8921c55d924d4f68f2c60f24f914673f9a74a" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -204,9 +205,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.3" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "411aff151f2a73124ee473708e82ed51b2535f68928b6a1caa8bc1246ae6f7cd" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "arbitrary", @@ -215,16 +216,22 @@ dependencies = [ "const-hex", "derive_arbitrary", "derive_more 1.0.0", + "foldhash", "getrandom", + "hashbrown 0.15.1", "hex-literal", + "indexmap 2.6.0", "itoa", "k256 0.13.4", "keccak-asm", + "paste", "proptest", "proptest-derive", "rand", "ruint", + "rustc-hash 2.0.0", "serde", + "sha3 0.10.8", "tiny-keccak", ] @@ -247,7 +254,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -273,9 +280,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -288,49 +295,49 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -504,7 +511,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -516,7 +523,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "synstructure", ] @@ -528,7 +535,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -550,9 +557,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock", "cfg-if", @@ -561,7 +568,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.37", + "rustix 0.38.41", "slab", "tracing", "windows-sys 0.59.0", @@ -580,13 +587,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -643,20 +650,20 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.6" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -665,7 +672,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.1", "hyper-util", "itoa", "matchit", @@ -678,9 +685,9 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", - "tower 0.5.1", + "tower", "tower-layer", "tower-service", "tracing", @@ -688,9 +695,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -701,7 +708,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", "tracing", @@ -827,7 +834,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.3.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -842,7 +849,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.4.1", + "hyper 1.5.1", "lighthouse_network", "monitoring_api", "node_test_rig", @@ -907,9 +914,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -924,7 +931,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.89", "which", ] @@ -1072,7 +1079,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.3.0" +version = "6.0.1" dependencies = [ "beacon_node", "bytes", @@ -1139,9 +1146,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] @@ -1211,7 +1218,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1222,9 +1229,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.21" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -1348,9 +1355,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -1358,9 +1365,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -1378,14 +1385,14 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "clap_utils" @@ -1412,7 +1419,6 @@ dependencies = [ "directory", "dirs", "environment", - "error-chain", "eth1", "eth2", "eth2_config", @@ -1456,9 +1462,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "compare_fields" @@ -1487,9 +1493,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" dependencies = [ "cfg-if", "cpufeatures", @@ -1543,9 +1549,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -1794,7 +1800,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -1842,7 +1848,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -1864,7 +1870,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -1889,9 +1895,9 @@ dependencies = [ [[package]] name = "dary_heap" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" +checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" [[package]] name = "data-encoding" @@ -1942,16 +1948,6 @@ version = "0.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" -[[package]] -name = "delay_map" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" -dependencies = [ - "futures", - "tokio-util", -] - [[package]] name = "delay_map" version = "0.4.0" @@ -2034,13 +2030,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2053,7 +2049,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2073,15 +2069,15 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.4" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "158fe8e2e68695bd615d7e4f3227c0727b151330d3e253b525086c348d055d5e" +checksum = "cbf9649c05e0a9dbd6d0b0b8301db5182b972d0fd02f0a7c6736cf632d7c0fd5" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -2101,7 +2097,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2121,7 +2117,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2206,7 +2202,7 @@ dependencies = [ "alloy-rlp", "arrayvec", "ctr 0.9.2", - "delay_map 0.4.0", + "delay_map", "enr", "fnv", "futures", @@ -2236,7 +2232,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2267,7 +2263,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2412,9 +2408,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] @@ -2447,7 +2443,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2519,16 +2515,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "backtrace", - "version_check", -] - [[package]] name = "eth1" version = "0.2.0" @@ -2576,8 +2562,6 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ - "account_utils", - "bytes", "derivative", "eth2_keystore", "ethereum_serde_utils", @@ -2585,7 +2569,6 @@ dependencies = [ "ethereum_ssz_derive", "futures", "futures-util", - "libsecp256k1", "lighthouse_network", "mediatype", "pretty_reqwest_error", @@ -2593,7 +2576,7 @@ dependencies = [ "proto_array", "psutil", "reqwest", - "ring 0.16.20", + "reqwest-eventsource", "sensitive_url", "serde", "serde_json", @@ -2602,6 +2585,7 @@ dependencies = [ "store", "tokio", "types", + "zeroize", ] [[package]] @@ -2718,7 +2702,7 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "thiserror", + "thiserror 1.0.69", "uint 0.9.5", ] @@ -2735,7 +2719,7 @@ dependencies = [ "serde", "serde_json", "sha3 0.10.8", - "thiserror", + "thiserror 1.0.69", "uint 0.9.5", ] @@ -2841,7 +2825,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -2860,7 +2844,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2927,7 +2911,7 @@ dependencies = [ "serde_json", "strum", "syn 1.0.109", - "thiserror", + "thiserror 1.0.69", "tiny-keccak", "unicode-xid", ] @@ -2955,7 +2939,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "tracing-futures", @@ -2994,6 +2978,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eventsource-stream" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" +dependencies = [ + "futures-core", + "nom", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -3094,9 +3089,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fastrlp" @@ -3116,7 +3111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3204,9 +3199,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "libz-sys", @@ -3219,6 +3214,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -3283,9 +3284,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -3324,9 +3325,9 @@ checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -3342,9 +3343,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ "futures-core", "pin-project-lite", @@ -3358,7 +3359,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -3368,7 +3369,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.13", + "rustls 0.23.18", "rustls-pki-types", ] @@ -3384,22 +3385,15 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" -[[package]] -name = "futures-ticker" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" -dependencies = [ - "futures", - "futures-timer", - "instant", -] - [[package]] name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +dependencies = [ + "gloo-timers", + "send_wrapper 0.4.0", +] [[package]] name = "futures-util" @@ -3485,9 +3479,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git-version" @@ -3506,7 +3500,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -3515,6 +3509,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "gossipsub" version = "0.5.0" @@ -3527,7 +3533,6 @@ dependencies = [ "either", "fnv", "futures", - "futures-ticker", "futures-timer", "getrandom", "hashlink 0.9.1", @@ -3594,7 +3599,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3642,6 +3647,18 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] + [[package]] name = "hashers" version = "1.0.1" @@ -3765,7 +3782,7 @@ dependencies = [ "once_cell", "rand", "socket2", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -3788,7 +3805,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -3987,9 +4004,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -4005,9 +4022,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -4029,9 +4046,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -4054,7 +4071,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -4067,7 +4084,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper 0.14.31", "native-tls", "tokio", "tokio-native-tls", @@ -4075,18 +4092,17 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.1", "pin-project-lite", "tokio", - "tower 0.4.13", "tower-service", ] @@ -4113,6 +4129,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -4131,12 +4265,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -4151,9 +4296,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ "async-io", "core-foundation", @@ -4162,8 +4307,12 @@ dependencies = [ "if-addrs", "ipnet", "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", "rtnetlink", - "system-configuration", + "system-configuration 0.6.1", "tokio", "windows", ] @@ -4179,7 +4328,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rand", "tokio", @@ -4202,7 +4351,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.12", + "parity-scale-codec 3.7.0", ] [[package]] @@ -4234,13 +4383,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -4261,12 +4410,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ + "arbitrary", "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.1", + "serde", ] [[package]] @@ -4292,6 +4443,7 @@ dependencies = [ "url", "validator_dir", "validator_metrics", + "zeroize", ] [[package]] @@ -4358,9 +4510,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" @@ -4408,9 +4560,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jobserver" @@ -4423,9 +4575,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -4537,7 +4689,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.3.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -4596,9 +4748,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.158" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libflate" @@ -4636,9 +4788,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libmdbx" @@ -4652,7 +4804,7 @@ dependencies = [ "libc", "mdbx-sys", "parking_lot 0.12.3", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4684,7 +4836,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4732,7 +4884,7 @@ dependencies = [ "rand", "rw-stream-sink", "smallvec", - "thiserror", + "thiserror 1.0.69", "tracing", "unsigned-varint 0.8.0", "void", @@ -4773,16 +4925,16 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror", + "thiserror 1.0.69", "tracing", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58 0.5.1", @@ -4795,9 +4947,8 @@ dependencies = [ "rand", "sec1 0.7.3", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "tracing", - "void", "zeroize", ] @@ -4877,7 +5028,7 @@ dependencies = [ "sha2 0.10.8", "snow", "static_assertions", - "thiserror", + "thiserror 1.0.69", "tracing", "x25519-dalek", "zeroize", @@ -4916,9 +5067,9 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.13", + "rustls 0.23.18", "socket2", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -4956,7 +5107,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -4988,9 +5139,9 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.13", + "rustls 0.23.18", "rustls-webpki 0.101.7", - "thiserror", + "thiserror 1.0.69", "x509-parser", "yasna", ] @@ -5020,10 +5171,10 @@ dependencies = [ "either", "futures", "libp2p-core", - "thiserror", + "thiserror 1.0.69", "tracing", "yamux 0.12.1", - "yamux 0.13.3", + "yamux 0.13.4", ] [[package]] @@ -5108,7 +5259,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.3.0" +version = "6.0.1" dependencies = [ "account_manager", "account_utils", @@ -5147,6 +5298,7 @@ dependencies = [ "validator_client", "validator_dir", "validator_manager", + "zeroize", ] [[package]] @@ -5157,12 +5309,11 @@ dependencies = [ "alloy-rlp", "async-channel", "bytes", - "delay_map 0.3.0", + "delay_map", "directory", "dirs", "discv5", "either", - "error-chain", "ethereum_ssz", "ethereum_ssz_derive", "fnv", @@ -5232,6 +5383,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "lmdb-rkv" version = "0.14.0" @@ -5300,11 +5457,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.1", ] [[package]] @@ -5433,18 +5590,18 @@ dependencies = [ [[package]] name = "metastruct" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00a5ba4a0f3453c31c397b214e1675d95b697c33763aa58add57ea833424384" +checksum = "d74f54f231f9a18d77393ecc5cc7ab96709b2a61ee326c2b2b291009b0cc5a07" dependencies = [ "metastruct_macro", ] [[package]] name = "metastruct_macro" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3a991d4536c933306e52f0e8ab303757185ec13a09d1f3e1cbde5a0d8410bf" +checksum = "985e7225f3a4dfbec47a0c6a730a874185fda840d365d7bbd6ba199dd81796d5" dependencies = [ "darling 0.13.4", "itertools 0.10.5", @@ -5580,9 +5737,9 @@ checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -5593,7 +5750,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", "url", ] @@ -5610,12 +5767,12 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ "core2", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -5651,21 +5808,20 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.4.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" dependencies = [ "anyhow", "byteorder", - "libc", "netlink-packet-utils", ] [[package]] name = "netlink-packet-route" -version = "0.12.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -5684,21 +5840,21 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -5726,9 +5882,8 @@ dependencies = [ "beacon_chain", "beacon_processor", "bls", - "delay_map 0.3.0", + "delay_map", "derivative", - "error-chain", "eth2", "eth2_network_config", "ethereum_ssz", @@ -5776,6 +5931,17 @@ dependencies = [ "libc", ] +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + [[package]] name = "nix" version = "0.29.0" @@ -5916,9 +6082,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] @@ -5934,9 +6100,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oneshot_broadcast" @@ -5984,9 +6150,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -6005,7 +6171,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -6016,18 +6182,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.3.2+3.3.2" +version = "300.4.1+3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a211a18d945ef7e648cc6e0058f4c548ee46aab922ea203e0d30e966ea23647b" +checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -6101,15 +6267,16 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.12", + "parity-scale-codec-derive 3.7.0", + "rustversion", "serde", ] @@ -6127,14 +6294,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -6186,7 +6353,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.4", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -6256,12 +6423,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.69", "ucd-trie", ] @@ -6295,29 +6462,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -6347,9 +6514,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "platforms" @@ -6387,15 +6554,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.37", + "rustix 0.38.41", "tracing", "windows-sys 0.59.0", ] @@ -6486,12 +6653,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -6546,14 +6713,14 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.21", + "toml_edit 0.22.22", ] [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -6585,7 +6752,7 @@ dependencies = [ "memchr", "parking_lot 0.12.3", "protobuf", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6608,7 +6775,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -6625,7 +6792,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -6639,7 +6806,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -6676,7 +6843,7 @@ dependencies = [ "num_cpus", "once_cell", "platforms", - "thiserror", + "thiserror 1.0.69", "unescape", ] @@ -6703,7 +6870,7 @@ dependencies = [ "asynchronous-codec", "bytes", "quick-protobuf", - "thiserror", + "thiserror 1.0.69", "unsigned-varint 0.8.0", ] @@ -6731,9 +6898,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "futures-io", @@ -6741,36 +6908,40 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.18", "socket2", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.13", + "rustls 0.23.18", + "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -6829,6 +7000,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core", + "serde", ] [[package]] @@ -6893,9 +7065,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" +checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" dependencies = [ "libc", ] @@ -6911,9 +7083,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -6926,19 +7098,19 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -6952,13 +7124,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -6969,9 +7141,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -6987,7 +7159,7 @@ dependencies = [ "h2", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls", "hyper-tls", "ipnet", @@ -7004,7 +7176,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", @@ -7019,6 +7191,22 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest-eventsource" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f529a5ff327743addc322af460761dff5b50e0c826b9e6ac44c3195c50bb2026" +dependencies = [ + "eventsource-stream", + "futures-core", + "futures-timer", + "mime", + "nom", + "pin-project-lite", + "reqwest", + "thiserror 1.0.69", +] + [[package]] name = "resolv-conf" version = "0.7.0" @@ -7128,16 +7316,19 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" dependencies = [ "futures", "log", + "netlink-packet-core", "netlink-packet-route", + "netlink-packet-utils", "netlink-proto", - "nix 0.24.3", - "thiserror", + "netlink-sys", + "nix 0.26.4", + "thiserror 1.0.69", "tokio", ] @@ -7155,7 +7346,7 @@ dependencies = [ "fastrlp", "num-bigint", "num-traits", - "parity-scale-codec 3.6.12", + "parity-scale-codec 3.7.0", "primitive-types 0.12.2", "proptest", "rand", @@ -7267,9 +7458,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -7306,9 +7497,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "once_cell", "ring 0.17.8", @@ -7329,19 +7520,21 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -7366,9 +7559,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -7423,33 +7616,33 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "cfg-if", - "derive_more 0.99.18", - "parity-scale-codec 3.6.12", + "derive_more 1.0.0", + "parity-scale-codec 3.7.0", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -7540,9 +7733,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -7568,13 +7761,19 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + [[package]] name = "send_wrapper" version = "0.6.0" @@ -7591,9 +7790,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -7610,20 +7809,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -7649,14 +7848,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -7701,7 +7900,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -7843,7 +8042,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -8231,7 +8430,7 @@ dependencies = [ "tempfile", "types", "xdelta3", - "zstd 0.13.1", + "zstd 0.13.2", ] [[package]] @@ -8322,9 +8521,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -8339,9 +8538,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "synstructure" @@ -8351,7 +8550,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8377,7 +8576,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -8390,6 +8600,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "system_health" version = "0.1.0" @@ -8442,14 +8662,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.37", + "rustix 0.38.41", "windows-sys 0.59.0", ] @@ -8475,12 +8695,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ - "rustix 0.38.37", - "windows-sys 0.48.0", + "rustix 0.38.41", + "windows-sys 0.59.0", ] [[package]] @@ -8518,22 +8738,42 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "thiserror-impl", + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8641,7 +8881,7 @@ dependencies = [ "rand", "rustc-hash 1.1.0", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", "zeroize", @@ -8656,6 +8896,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -8683,9 +8933,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -8716,7 +8966,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -8821,7 +9071,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.21", + "toml_edit 0.22.22", ] [[package]] @@ -8839,37 +9089,22 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", + "winnow 0.6.20", ] [[package]] @@ -8919,7 +9154,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror", + "thiserror 1.0.69", "time", "tracing-subscriber", ] @@ -8932,7 +9167,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -9023,7 +9258,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", ] [[package]] @@ -9038,9 +9273,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" +checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" dependencies = [ "serde", "stable_deref_trait", @@ -9113,9 +9348,9 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -9155,24 +9390,21 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -9185,9 +9417,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-xid" @@ -9250,15 +9482,27 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -9290,7 +9534,7 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.4.1", + "hyper 1.5.1", "initialized_validators", "metrics", "monitoring_api", @@ -9336,6 +9580,8 @@ dependencies = [ "beacon_node_fallback", "bls", "deposit_contract", + "directory", + "dirs", "doppelganger_service", "eth2", "eth2_keystore", @@ -9368,6 +9614,7 @@ dependencies = [ "validator_store", "warp", "warp_utils", + "zeroize", ] [[package]] @@ -9411,6 +9658,7 @@ dependencies = [ "tree_hash", "types", "validator_http_api", + "zeroize", ] [[package]] @@ -9529,13 +9777,13 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "mime", "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "scoped-tls", "serde", "serde_json", @@ -9580,9 +9828,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -9591,24 +9839,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -9618,9 +9866,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9628,28 +9876,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -9688,7 +9936,7 @@ dependencies = [ "env_logger 0.9.3", "eth2", "http_api", - "hyper 1.4.1", + "hyper 1.5.1", "log", "logging", "network", @@ -9709,9 +9957,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -9770,7 +10018,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.37", + "rustix 0.38.41", ] [[package]] @@ -9779,7 +10027,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.4", + "redox_syscall 0.5.7", "wasite", "web-sys", ] @@ -9829,12 +10077,12 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.51.1" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" dependencies = [ - "windows-core 0.51.1", - "windows-targets 0.48.5", + "windows-core 0.53.0", + "windows-targets 0.52.6", ] [[package]] @@ -9851,18 +10099,28 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.52.0" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" dependencies = [ "windows-targets 0.52.6", ] @@ -10092,9 +10350,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -10109,6 +10367,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -10121,8 +10391,8 @@ dependencies = [ "log", "pharos", "rustc_version 0.4.1", - "send_wrapper", - "thiserror", + "send_wrapper 0.6.0", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -10168,7 +10438,7 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -10188,9 +10458,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" [[package]] name = "xmltree" @@ -10229,9 +10499,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31b5e376a8b012bee9c423acdbb835fc34d45001cfa3106236a624e4b738028" +checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" dependencies = [ "futures", "log", @@ -10252,6 +10522,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -10270,7 +10564,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", ] [[package]] @@ -10279,6 +10594,7 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ + "serde", "zeroize_derive", ] @@ -10290,7 +10606,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.89", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] @@ -10324,11 +10662,11 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ - "zstd-safe 7.1.0", + "zstd-safe 7.2.1", ] [[package]] @@ -10343,9 +10681,9 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.1.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ "zstd-sys", ] diff --git a/Cargo.toml b/Cargo.toml index 8cf4abb33eb..23e52a306b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,11 +8,11 @@ members = [ "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", - "beacon_node/lighthouse_network", - "beacon_node/lighthouse_network/gossipsub", "beacon_node/execution_layer", "beacon_node/http_api", "beacon_node/http_metrics", + "beacon_node/lighthouse_network", + "beacon_node/lighthouse_network/gossipsub", "beacon_node/network", "beacon_node/store", "beacon_node/timer", @@ -30,40 +30,40 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/metrics", "common/lighthouse_version", "common/lockfile", "common/logging", "common/lru_cache", "common/malloc_utils", + "common/metrics", + "common/monitoring_api", "common/oneshot_broadcast", "common/pretty_reqwest_error", "common/sensitive_url", "common/slot_clock", "common/system_health", - "common/task_executor", "common/target_check", + "common/task_executor", "common/test_random_derive", "common/unused_port", "common/validator_dir", "common/warp_utils", - "common/monitoring_api", - - "database_manager", - - "consensus/int_to_bytes", "consensus/fixed_bytes", "consensus/fork_choice", + + "consensus/int_to_bytes", "consensus/proto_array", "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", "crypto/bls", - "crypto/kzg", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", + "crypto/kzg", + + "database_manager", "lcli", @@ -78,8 +78,8 @@ members = [ "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", - "testing/test-test_logger", "testing/state_transition_vectors", + "testing/test-test_logger", "testing/web3signer_tests", "validator_client", @@ -122,14 +122,20 @@ clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } c-kzg = { version = "1", default-features = false } compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.5" -delay_map = "0.3" +delay_map = "0.4" derivative = "2" dirs = "3" either = "1.9" -rust_eth_kzg = "0.5.1" +# TODO: rust_eth_kzg is pinned for now while a perf regression is investigated +# The crate_crypto_* dependencies can be removed from this file completely once we update +rust_eth_kzg = "=0.5.1" +crate_crypto_internal_eth_kzg_bls12_381 = "=0.5.1" +crate_crypto_internal_eth_kzg_erasure_codes = "=0.5.1" +crate_crypto_internal_eth_kzg_maybe_rayon = "=0.5.1" +crate_crypto_internal_eth_kzg_polynomial = "=0.5.1" +crate_crypto_kzg_multi_open_fk20 = "=0.5.1" discv5 = { version = "0.9", features = ["libp2p"] } env_logger = "0.9" -error-chain = "0.12" ethereum_hashing = "0.7.0" ethereum_serde_utils = "0.7" ethereum_ssz = "0.7" @@ -161,7 +167,13 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } +reqwest = { version = "0.11", default-features = false, features = [ + "blocking", + "json", + "stream", + "rustls-tls", + "native-tls-vendored", +] } ring = "0.16" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } @@ -170,7 +182,11 @@ serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = ["max_level_debug", "release_max_level_debug", "nested-values"] } +slog = { version = "2", features = [ + "max_level_debug", + "release_max_level_debug", + "nested-values", +] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } @@ -182,7 +198,12 @@ superstruct = "0.8" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal", "macros"] } +tokio = { version = "1", features = [ + "rt-multi-thread", + "sync", + "signal", + "macros", +] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" @@ -195,7 +216,7 @@ tree_hash_derive = "0.8" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.7", default-features = false, features = ["tls"] } -zeroize = { version = "1", features = ["zeroize_derive"] } +zeroize = { version = "1", features = ["zeroize_derive", "serde"] } zip = "0.6" # Local crates. @@ -261,7 +282,7 @@ validator_dir = { path = "common/validator_dir" } validator_http_api = { path = "validator_client/http_api" } validator_http_metrics = { path = "validator_client/http_metrics" } validator_metrics = { path = "validator_client/validator_metrics" } -validator_store= { path = "validator_client/validator_store" } +validator_store = { path = "validator_client/validator_store" } warp_utils = { path = "common/warp_utils" } xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" } zstd = "0.13" diff --git a/FUNDING.json b/FUNDING.json index 5001999927c..3164f351be0 100644 --- a/FUNDING.json +++ b/FUNDING.json @@ -2,6 +2,13 @@ "drips": { "ethereum": { "ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b" + }, + "filecoin": { + "ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b" } + }, + "opRetro": { + "projectId": "0x04b1cd5a7c59117474ce414b309fa48e985bdaab4b0dab72045f74d04ebd8cff" } -} \ No newline at end of file +} + diff --git a/Makefile b/Makefile index fd7d45f26a0..8faf8a2e54b 100644 --- a/Makefile +++ b/Makefile @@ -220,6 +220,10 @@ lint: lint-fix: EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint +# Also run the lints on the optimized-only tests +lint-full: + RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum @@ -240,7 +244,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit + cargo audit --ignore RUSTSEC-2024-0421 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/README.md b/README.md index 4b22087bcdc..147a06e5040 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Lighthouse is: - Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and excellent performance (comparable to C++). - Funded by various organisations, including Sigma Prime, the - Ethereum Foundation, ConsenSys, the Decentralization Foundation and private individuals. + Ethereum Foundation, Consensys, the Decentralization Foundation and private individuals. - Actively involved in the specification and security analysis of the Ethereum proof-of-stake consensus specification. diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 7f2fa05a888..a7752d621ff 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -8,25 +8,26 @@ authors = [ edition = { workspace = true } [dependencies] +account_utils = { workspace = true } bls = { workspace = true } clap = { workspace = true } -types = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_keystore = { workspace = true } +eth2_network_config = { workspace = true } eth2_wallet = { workspace = true } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } -validator_dir = { workspace = true } -tokio = { workspace = true } -eth2_keystore = { workspace = true } -account_utils = { workspace = true } -slashing_protection = { workspace = true } -eth2 = { workspace = true } -safe_arith = { workspace = true } -slot_clock = { workspace = true } filesystem = { workspace = true } +safe_arith = { workspace = true } sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index ec5af1e2ece..73e0ad54d47 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -294,7 +294,7 @@ pub fn read_wallet_password_from_cli( eprintln!(); eprintln!("{}", WALLET_PASSWORD_PROMPT); let password = - PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + PlainText::from(read_password_from_user(stdin_inputs)?.as_bytes().to_vec()); Ok(password) } } diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 3fb0e50d225..ea1a24da1ff 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -409,6 +409,6 @@ mod tests { ) .unwrap(); - assert_eq!(expected_pk, kp.pk.into()); + assert_eq!(expected_pk, kp.pk); } } diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 19ab5ad60ac..4d2353b5534 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -7,7 +7,7 @@ use account_utils::{ recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, }, - ZeroizeString, STDIN_INPUTS_FLAG, + STDIN_INPUTS_FLAG, }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; @@ -16,6 +16,7 @@ use std::fs; use std::path::PathBuf; use std::thread::sleep; use std::time::Duration; +use zeroize::Zeroizing; pub const CMD: &str = "import"; pub const KEYSTORE_FLAG: &str = "keystore"; @@ -148,7 +149,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin // Skip keystores that already exist, but exit early if any operation fails. // Reuses the same password for all keystores if the `REUSE_PASSWORD_FLAG` flag is set. let mut num_imported_keystores = 0; - let mut previous_password: Option = None; + let mut previous_password: Option> = None; for src_keystore in &keystore_paths { let keystore = Keystore::from_json_file(src_keystore) @@ -182,14 +183,17 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password = match keystore_password_path.as_ref() { Some(path) => { - let password_from_file: ZeroizeString = fs::read_to_string(path) + let password_from_file: Zeroizing = fs::read_to_string(path) .map_err(|e| format!("Unable to read {:?}: {:?}", path, e))? .into(); - password_from_file.without_newlines() + password_from_file + .trim_end_matches(['\r', '\n']) + .to_string() + .into() } None => { let password_from_user = read_password_from_user(stdin_inputs)?; - if password_from_user.as_ref().is_empty() { + if password_from_user.is_empty() { eprintln!("Continuing without password."); sleep(Duration::from_secs(1)); // Provides nicer UX. break None; @@ -314,7 +318,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin /// Otherwise, returns the keystore error. fn check_password_on_keystore( keystore: &Keystore, - password: &ZeroizeString, + password: &Zeroizing, ) -> Result { match keystore.decrypt_keypair(password.as_ref()) { Ok(_) => { diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index b22007050fd..6369646929a 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -226,14 +226,14 @@ pub fn read_new_wallet_password_from_cli( eprintln!(); eprintln!("{}", NEW_WALLET_PASSWORD_PROMPT); let password = - PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + PlainText::from(read_password_from_user(stdin_inputs)?.as_bytes().to_vec()); // Ensure the password meets the minimum requirements. match is_password_sufficiently_complex(password.as_bytes()) { Ok(_) => { eprintln!("{}", RETYPE_PASSWORD_PROMPT); let retyped_password = - PlainText::from(read_password_from_user(stdin_inputs)?.as_ref().to_vec()); + PlainText::from(read_password_from_user(stdin_inputs)?.as_bytes().to_vec()); if retyped_password == password { break Ok(password); } else { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index bb946e3c5a2..7da65ad7426 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.3.0" +version = "6.0.1" authors = [ "Paul Hauner ", "Age Manning { indexed_attestation: IndexedAttestation, } -impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { +impl VerifiedAggregatedAttestation<'_, T> { pub fn into_indexed_attestation(self) -> IndexedAttestation { self.indexed_attestation } @@ -319,7 +319,7 @@ pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> { subnet_id: SubnetId, } -impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { +impl VerifiedUnaggregatedAttestation<'_, T> { pub fn into_indexed_attestation(self) -> IndexedAttestation { self.indexed_attestation } @@ -327,7 +327,7 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive /// macro. -impl<'a, T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'a, T> { +impl Clone for IndexedUnaggregatedAttestation<'_, T> { fn clone(&self) -> Self { Self { attestation: self.attestation, @@ -353,7 +353,7 @@ pub trait VerifiedAttestation: Sized { } } -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttestation<'a, T> { +impl VerifiedAttestation for VerifiedAggregatedAttestation<'_, T> { fn attestation(&self) -> AttestationRef { self.attestation() } @@ -363,7 +363,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttes } } -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregatedAttestation<'a, T> { +impl VerifiedAttestation for VerifiedUnaggregatedAttestation<'_, T> { fn attestation(&self) -> AttestationRef { self.attestation } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a78ae266e5a..80766d57b33 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1112,6 +1112,7 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. + #[allow(clippy::type_complexity)] pub fn get_blocks_checking_caches( self: &Arc, block_roots: Vec, @@ -1127,6 +1128,7 @@ impl BeaconChain { Ok(BeaconBlockStreamer::::new(self, CheckCaches::Yes)?.launch_stream(block_roots)) } + #[allow(clippy::type_complexity)] pub fn get_blocks( self: &Arc, block_roots: Vec, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 3ae19430aad..ddb7bb614a3 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -92,6 +92,7 @@ use std::fs; use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use strum::AsRefStr; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, @@ -137,7 +138,7 @@ const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); /// /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. /// - We encountered an error whilst trying to verify the block (a `BeaconChainError`). -#[derive(Debug)] +#[derive(Debug, AsRefStr)] pub enum BlockError { /// The parent block was unknown. /// @@ -2072,6 +2073,7 @@ pub fn get_validator_pubkey_cache( /// /// The signature verifier is empty because it does not yet have any of this block's signatures /// added to it. Use `Self::apply_to_signature_verifier` to apply the signatures. +#[allow(clippy::type_complexity)] fn get_signature_verifier<'a, T: BeaconChainTypes>( state: &'a BeaconState, validator_pubkey_cache: &'a ValidatorPubkeyCache, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 589db0af501..9d99ff9d8e0 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1037,7 +1037,9 @@ where ); // Check for states to reconstruct (in the background). - if beacon_chain.config.reconstruct_historic_states { + if beacon_chain.config.reconstruct_historic_states + && beacon_chain.store.get_oldest_block_slot() == 0 + { beacon_chain.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 276262085eb..cb6e4c34f3e 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -107,8 +107,7 @@ fn get_sync_status( // Determine how many voting periods are contained in distance between // now and genesis, rounding up. - let voting_periods_past = - (seconds_till_genesis + voting_period_duration - 1) / voting_period_duration; + let voting_periods_past = seconds_till_genesis.div_ceil(voting_period_duration); // Return the start time of the current voting period*. // diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f2420eea0d2..92d24c53c00 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,14 +7,13 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. -use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest, - PayloadAttributes, PayloadStatus, + PayloadAttributes, PayloadParameters, PayloadStatus, }; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; @@ -284,9 +283,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); - // Store Optimistic Transition Block in Database for later Verification - OptimisticTransitionBlock::from_block(block) - .persist_in_store::(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) @@ -375,8 +371,9 @@ pub fn get_execution_payload( let timestamp = compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; - let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash(); + let latest_execution_payload_header = state.latest_execution_payload_header()?; + let latest_execution_payload_header_block_hash = latest_execution_payload_header.block_hash(); + let latest_execution_payload_header_gas_limit = latest_execution_payload_header.gas_limit(); let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { Some(get_expected_withdrawals(state, spec)?.0.into()) @@ -406,6 +403,7 @@ pub fn get_execution_payload( random, proposer_index, latest_execution_payload_header_block_hash, + latest_execution_payload_header_gas_limit, builder_params, withdrawals, parent_beacon_block_root, @@ -443,6 +441,7 @@ pub async fn prepare_execution_payload( random: Hash256, proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, + latest_execution_payload_header_gas_limit: u64, builder_params: BuilderParams, withdrawals: Option>, parent_beacon_block_root: Option, @@ -526,13 +525,20 @@ where parent_beacon_block_root, ); + let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit: latest_execution_payload_header_gas_limit, + proposer_gas_limit: target_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let block_contents = execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - fork, &chain.spec, builder_boost_factor, block_production_version, diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 1680c0298d1..bd47e82215e 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -7,8 +7,9 @@ use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ - Blob, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, - KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, + Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar, + DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedBlindedBeaconBlock, }; /// Converts a blob ssz List object to an array to be used with the kzg @@ -243,6 +244,83 @@ fn build_data_column_sidecars( Ok(sidecars) } +/// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). +/// +/// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated +/// with the block. +pub fn reconstruct_blobs( + kzg: &Kzg, + data_columns: &[Arc>], + blob_indices_opt: Option>, + signed_block: &SignedBlindedBeaconBlock, +) -> Result, String> { + // The data columns are from the database, so we assume their correctness. + let first_data_column = data_columns + .first() + .ok_or("data_columns should have at least one element".to_string())?; + + let blob_indices: Vec = match blob_indices_opt { + Some(indices) => indices.into_iter().map(|i| i as usize).collect(), + None => { + let num_of_blobs = first_data_column.kzg_commitments.len(); + (0..num_of_blobs).collect() + } + }; + + let blob_sidecars = blob_indices + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column + .column + .get(row_index) + .ok_or(format!("Missing data column at row index {row_index}")) + .and_then(|cell| { + ssz_cell_to_crypto_cell::(cell).map_err(|e| format!("{e:?}")) + })?; + + cells.push(cell); + cell_ids.push(data_column.index); + } + + let (cells, _kzg_proofs) = kzg + .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + .map_err(|e| format!("Failed to recover cells and compute KZG proofs: {e:?}"))?; + + let num_cells_original_blob = cells.len() / 2; + let blob_bytes = cells + .into_iter() + .take(num_cells_original_blob) + .flat_map(|cell| cell.into_iter()) + .collect(); + + let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; + let kzg_commitment = first_data_column + .kzg_commitments + .get(row_index) + .ok_or(format!("Missing KZG commitment for blob {row_index}"))?; + let kzg_proof = compute_blob_kzg_proof::(kzg, &blob, *kzg_commitment) + .map_err(|e| format!("{e:?}"))?; + + BlobSidecar::::new_with_existing_proof( + row_index, + blob, + signed_block, + first_data_column.signed_block_header.clone(), + &first_data_column.kzg_commitments_inclusion_proof, + kzg_proof, + ) + .map(Arc::new) + .map_err(|e| format!("{e:?}")) + }) + .collect::, _>>()? + .into(); + + Ok(blob_sidecars) +} + /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). pub fn reconstruct_data_columns( kzg: &Kzg, @@ -265,7 +343,7 @@ pub fn reconstruct_data_columns( for data_column in data_columns { let cell = data_column.column.get(row_index).ok_or( KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" + "Missing data column at row index {row_index}" )), )?; @@ -289,12 +367,16 @@ pub fn reconstruct_data_columns( #[cfg(test)] mod test { - use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use crate::kzg_utils::{ + blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, + }; use bls::Signature; + use eth2::types::BlobsBundle; + use execution_layer::test_utils::generate_blobs; use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, - ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec, + EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, }; type E = MainnetEthSpec; @@ -308,6 +390,7 @@ mod test { test_build_data_columns_empty(&kzg, &spec); test_build_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns(&kzg, &spec); } #[track_caller] @@ -379,6 +462,36 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let signed_blinded_block = signed_block.into(); + let blob_indices = vec![3, 4, 5]; + let reconstructed_blobs = reconstruct_blobs( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + Some(blob_indices.clone()), + &signed_blinded_block, + ) + .unwrap(); + + for i in blob_indices { + let reconstructed_blob = &reconstructed_blobs + .iter() + .find(|sidecar| sidecar.index == i) + .map(|sidecar| sidecar.blob.clone()) + .expect("reconstructed blob should exist"); + let original_blob = blobs.get(i as usize).unwrap(); + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) @@ -397,12 +510,20 @@ mod test { KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) .unwrap(); - let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let (blobs_bundle, _) = generate_blobs::(num_of_blobs).unwrap(); + let BlobsBundle { + blobs, + commitments, + proofs: _, + } = blobs_bundle; - let blobs = (0..num_of_blobs) - .map(|_| Blob::::default()) - .collect::>() - .into(); + *signed_block + .message_mut() + .body_mut() + .blob_kzg_commitments_mut() + .unwrap() = commitments; (signed_block, blobs) } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 2953516fb1a..d9728b9fd41 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -47,7 +47,6 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; -pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c6aa9fbcac7..ae3add7f032 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1656,7 +1656,7 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> }); pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { try_create_histogram_vec_with_buckets( - "data_column_sidecar_computation_seconds", + "beacon_data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", Ok(vec![0.1, 0.15, 0.25, 0.35, 0.5, 0.7, 1.0, 2.5, 5.0, 10.0]), &["blob_count"], @@ -1665,7 +1665,7 @@ pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = Laz pub static DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_column_sidecar_inclusion_proof_verification_seconds", + "beacon_data_column_sidecar_inclusion_proof_verification_seconds", "Time taken to verify data_column sidecar inclusion proof", ) }); @@ -1693,7 +1693,7 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_blobs_column_sidecar_processing_successes_total", + "beacon_data_column_sidecar_processing_successes_total", "Number of data column sidecars verified for gossip", ) }); @@ -1847,7 +1847,7 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_single_seconds", + "beacon_kzg_verification_data_column_single_seconds", "Runtime of single data column kzg verification", Ok(vec![ 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, @@ -1857,7 +1857,7 @@ pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_batch_seconds", + "beacon_kzg_verification_data_column_batch_seconds", "Runtime of batched data column kzg verification", Ok(vec![ 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, @@ -1910,14 +1910,14 @@ pub static DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_availability_reconstruction_time_seconds", + "beacon_data_availability_reconstruction_time_seconds", "Time taken to reconstruct columns", ) }); pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "data_availability_reconstructed_columns_total", + "beacon_data_availability_reconstructed_columns_total", "Total count of reconstructed columns", ) }); @@ -1925,7 +1925,7 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_attempts", + "beacon_kzg_data_column_reconstruction_attempts", "Count of times data column reconstruction has been attempted", ) }); @@ -1933,7 +1933,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_failures", + "beacon_kzg_data_column_reconstruction_failures", "Count of times data column reconstruction has failed", ) }); @@ -1941,7 +1941,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( - "kzg_data_column_reconstruction_incomplete_total", + "beacon_kzg_data_column_reconstruction_incomplete_total", "Count of times data column reconstruction attempts did not result in an import", &["reason"], ) diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 37a2e8917ba..bc4b8e1ed86 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -26,8 +26,10 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; const COMPACTION_FINALITY_DISTANCE: u64 = 1024; /// Maximum number of blocks applied in each reconstruction burst. /// -/// This limits the amount of time that the finalization migration is paused for. -const BLOCKS_PER_RECONSTRUCTION: usize = 8192 * 4; +/// This limits the amount of time that the finalization migration is paused for. We set this +/// conservatively because pausing the finalization migration for too long can cause hot state +/// cache misses and excessive disk use. +const BLOCKS_PER_RECONSTRUCTION: usize = 1024; /// Default number of epochs to wait between finalization migrations. pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index 038edfe27f0..dec012fb929 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -113,7 +113,7 @@ pub trait SubsetItem { fn root(&self) -> Result; } -impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> { +impl SubsetItem for AttestationRef<'_, E> { type Item = BitList; fn is_subset(&self, other: &Self::Item) -> bool { match self { @@ -159,7 +159,7 @@ impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> { } } -impl<'a, E: EthSpec> SubsetItem for &'a SyncCommitteeContribution { +impl SubsetItem for &SyncCommitteeContribution { type Item = BitVector; fn is_subset(&self, other: &Self::Item) -> bool { self.aggregation_bits.is_subset(other) diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs deleted file mode 100644 index 31034a7d59b..00000000000 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ /dev/null @@ -1,381 +0,0 @@ -use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; -use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, -}; -use itertools::process_results; -use proto_array::InvalidationOperation; -use slog::{crit, debug, error, info, warn}; -use slot_clock::SlotClock; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::is_merge_transition_complete; -use std::sync::Arc; -use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; -use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::time::sleep; -use tree_hash::TreeHash; -use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; -use DBColumn::OptimisticTransitionBlock as OTBColumn; - -#[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct OptimisticTransitionBlock { - root: Hash256, - slot: Slot, -} - -impl OptimisticTransitionBlock { - // types::BeaconBlockRef<'_, ::EthSpec> - pub fn from_block(block: BeaconBlockRef) -> Self { - Self { - root: block.tree_hash_root(), - slot: block.slot(), - } - } - - pub fn root(&self) -> &Hash256 { - &self.root - } - - pub fn slot(&self) -> &Slot { - &self.slot - } - - pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - if store - .as_ref() - .item_exists::(&self.root)? - { - Ok(()) - } else { - store.as_ref().put_item(&self.root, self) - } - } - - pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - store - .as_ref() - .hot_db - .key_delete(OTBColumn.into(), self.root.as_slice()) - } - - fn is_canonical( - &self, - chain: &BeaconChain, - ) -> Result { - Ok(chain - .forwards_iter_block_roots_until(self.slot, self.slot)? - .next() - .transpose()? - .map(|(root, _)| root) - == Some(self.root)) - } -} - -impl StoreItem for OptimisticTransitionBlock { - fn db_column() -> DBColumn { - OTBColumn - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - -/// The routine is expected to run once per epoch, 1/4th through the epoch. -pub const EPOCH_DELAY_FACTOR: u32 = 4; - -/// Spawns a routine which checks the validity of any optimistically imported transition blocks -/// -/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after -/// the start of each epoch. -/// -/// The service will not be started if there is no `execution_layer` on the `chain`. -pub fn start_otb_verification_service( - executor: TaskExecutor, - chain: Arc>, -) { - // Avoid spawning the service if there's no EL, it'll just error anyway. - if chain.execution_layer.is_some() { - executor.spawn( - async move { otb_verification_service(chain).await }, - "otb_verification_service", - ); - } -} - -pub fn load_optimistic_transition_blocks( - chain: &BeaconChain, -) -> Result, StoreError> { - process_results( - chain.store.hot_db.iter_column::(OTBColumn), - |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - }, - )? -} - -#[derive(Debug)] -pub enum Error { - ForkChoice(String), - BeaconChain(BeaconChainError), - StoreError(StoreError), - NoBlockFound(OptimisticTransitionBlock), -} - -pub async fn validate_optimistic_transition_blocks( - chain: &Arc>, - otbs: Vec, -) -> Result<(), Error> { - let finalized_slot = chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? - .slot; - - // separate otbs into - // non-canonical - // finalized canonical - // unfinalized canonical - let mut non_canonical_otbs = vec![]; - let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( - otbs.into_iter().map(|otb| { - otb.is_canonical(chain) - .map(|is_canonical| (otb, is_canonical)) - }), - |pair_iter| { - pair_iter - .filter_map(|(otb, is_canonical)| { - if is_canonical { - Some(otb) - } else { - non_canonical_otbs.push(otb); - None - } - }) - .partition::, _>(|otb| *otb.slot() <= finalized_slot) - }, - ) - .map_err(Error::BeaconChain)?; - - // remove non-canonical blocks that conflict with finalized checkpoint from the database - for otb in non_canonical_otbs { - if *otb.slot() <= finalized_slot { - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - } - } - - // ensure finalized canonical otb are valid, otherwise kill client - for otb in finalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Finalized Merge Transition Block is Invalid! Kill the Client! - crit!( - chain.log, - "Finalized merge transition block is invalid!"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block.canonical_root() - ); - let mut shutdown_sender = chain.shutdown_sender(); - if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - )) { - crit!( - chain.log, - "Failed to shut down client"; - "error" => ?e, - "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - ); - } - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - // attempt to validate any non-finalized canonical otb blocks - for otb in unfinalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "not finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload - warn!( - chain.log, - "Merge transition block invalid"; - "block_root" => ?otb.root() - ); - chain - .process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: *otb.root(), - }, - ) - .await - .map_err(|e| { - warn!( - chain.log, - "Error checking merge transition block"; - "error" => ?e, - "location" => "process_invalid_execution_payload" - ); - Error::BeaconChain(e) - })?; - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - Ok(()) -} - -/// Loop until any optimistically imported merge transition blocks have been verified and -/// the merge has been finalized. -async fn otb_verification_service(chain: Arc>) { - let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; - loop { - match chain - .slot_clock - .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) - { - Some(duration) => { - let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; - sleep(duration + additional_delay).await; - - debug!( - chain.log, - "OTB verification service firing"; - ); - - if !is_merge_transition_complete( - &chain.canonical_head.cached_head().snapshot.beacon_state, - ) { - // We are pre-merge. Nothing to do yet. - continue; - } - - // load all optimistically imported transition blocks from the database - match load_optimistic_transition_blocks(chain.as_ref()) { - Ok(otbs) => { - if otbs.is_empty() { - if chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_or(false, |block| { - block.execution_status.is_execution_enabled() - }) - { - // there are no optimistic blocks in the database, we can exit - // the service since the merge transition is finalized and we'll - // never see another transition block - break; - } else { - debug!( - chain.log, - "No optimistic transition blocks"; - "info" => "waiting for the merge transition to finalize" - ) - } - } - if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { - warn!( - chain.log, - "Error while validating optimistic transition blocks"; - "error" => ?e - ); - } - } - Err(e) => { - error!( - chain.log, - "Error loading optimistic transition blocks"; - "error" => ?e - ); - } - }; - } - None => { - error!(chain.log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(chain.slot_clock.slot_duration()).await; - } - }; - } - debug!( - chain.log, - "No optimistic transition blocks in database"; - "msg" => "shutting down OTB verification service" - ); -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index f532c0e6728..c34512ededb 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -152,7 +152,7 @@ pub fn delete_old_schema_freezer_data( db.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. - db.cold_db.compact()?; + db.compact_freezer()?; Ok(()) } diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index a662cc49c9d..da1d60db17d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -512,7 +512,7 @@ mod test { } assert!( - !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), + !cache.contains(&shuffling_id_and_committee_caches.first().unwrap().0), "should not contain oldest epoch shuffling id" ); assert_eq!( diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 0b121356b9d..87fefe71146 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -70,12 +70,12 @@ async fn produces_attestations_from_attestation_simulator_service() { } // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations - let hit_prometheus_metrics = vec![ + let hit_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, ]; - let miss_prometheus_metrics = vec![ + let miss_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e168cbb6f4d..dcc63ddf620 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -431,10 +431,12 @@ impl GossipTester { .chain .verify_aggregated_attestation_for_gossip(&aggregate) .err() - .expect(&format!( - "{} should error during verify_aggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_aggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -449,10 +451,12 @@ impl GossipTester { .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_aggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_aggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -475,10 +479,12 @@ impl GossipTester { .chain .verify_unaggregated_attestation_for_gossip(&attn, Some(subnet_id)) .err() - .expect(&format!( - "{} should error during verify_unaggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_unaggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -496,10 +502,12 @@ impl GossipTester { ) .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_unaggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_unaggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -816,7 +824,7 @@ async fn aggregated_gossip_verification() { let (index, sk) = tester.non_aggregator(); *a = SignedAggregateAndProof::from_aggregate( index as u64, - tester.valid_aggregate.message().aggregate().clone(), + tester.valid_aggregate.message().aggregate(), None, &sk, &chain.canonical_head.cached_head().head_fork(), diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 5bd3452623a..5080b0890bd 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -82,7 +82,7 @@ async fn merge_with_terminal_block_hash_override() { let block = &harness.chain.head_snapshot().beacon_block; - let execution_payload = block.message().body().execution_payload().unwrap().clone(); + let execution_payload = block.message().body().execution_payload().unwrap(); if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } @@ -133,7 +133,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -207,15 +207,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push( - block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(), - ); + execution_payloads.push(block.message().body().execution_payload().unwrap().into()); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index ac97a95721d..3ce5702f2ea 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -54,7 +54,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot).await; + Box::pin(harness.extend_to_slot(altair_fork_slot)).await; let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); @@ -63,7 +63,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -81,7 +81,7 @@ async fn base_altair_bellatrix_capella() { /* * Next Bellatrix block shouldn't include an exec payload. */ - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( @@ -112,7 +112,7 @@ async fn base_altair_bellatrix_capella() { terminal_block.timestamp = timestamp; } }); - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1325875a275..01b790bb25b 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -413,7 +413,7 @@ async fn invalid_payload_invalidates_parent() { rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; - let roots = vec![ + let roots = [ rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, @@ -986,10 +986,13 @@ async fn payload_preparation() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(1), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; @@ -1049,7 +1052,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -1119,10 +1122,13 @@ async fn payload_preparation_before_transition_block() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(0), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 522020e476d..e1258ccdea7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -330,7 +330,7 @@ async fn long_skip() { final_blocks as usize, BlockStrategy::ForkCanonicalChainAt { previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + first_slot: Slot::new(initial_blocks + skip_slots + 1), }, AttestationStrategy::AllValidators, ) @@ -381,8 +381,7 @@ async fn randao_genesis_storage() { .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_some()); + .any(|x| *x == genesis_value)); // Then upon adding one more block, it isn't harness.advance_slot(); @@ -393,14 +392,13 @@ async fn randao_genesis_storage() { AttestationStrategy::AllValidators, ) .await; - assert!(harness + assert!(!harness .chain .head_snapshot() .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_none()); + .any(|x| *x == genesis_value)); check_finalization(&harness, num_slots); check_split_slot(&harness, store); @@ -1062,7 +1060,7 @@ fn check_shuffling_compatible( let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.current_epoch(), - &head_state, + head_state, ); // Check for consistency with the more expensive shuffling lookup. @@ -1102,7 +1100,7 @@ fn check_shuffling_compatible( let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.previous_epoch(), - &head_state, + head_state, ); harness .chain @@ -1130,14 +1128,11 @@ fn check_shuffling_compatible( // Targeting two epochs before the current epoch should always return false if head_state.current_epoch() >= 2 { - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, - head_state.current_epoch() - 2, - &head_state - ), - false - ); + assert!(!harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() - 2, + head_state + )); } } } @@ -1559,14 +1554,13 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig - .add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ) - .await; + let (canonical_blocks, _, _, _) = Box::pin(rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + )) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1939,7 +1933,7 @@ async fn prune_single_block_long_skip() { 2 * slots_per_epoch, 1, 2 * slots_per_epoch, - 2 * slots_per_epoch as u64, + 2 * slots_per_epoch, 1, ) .await; @@ -1961,31 +1955,45 @@ async fn prune_shared_skip_states_mid_epoch() { #[tokio::test] async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; - pruning_test( + Box::pin(pruning_test( + slots_per_epoch - 1, + 1, + slots_per_epoch, + 2, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( + slots_per_epoch - 1, + 2, + slots_per_epoch, + 1, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + slots_per_epoch / 2, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + slots_per_epoch / 2 + 1, slots_per_epoch, - ) + )) .await; - pruning_test( + Box::pin(pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + slots_per_epoch / 2, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + slots_per_epoch / 2 + 1, slots_per_epoch, - ) + )) .await; - pruning_test( + Box::pin(pruning_test( 2 * slots_per_epoch - 1, - slots_per_epoch as u64, + slots_per_epoch, 1, 0, 2 * slots_per_epoch, - ) + )) .await; } @@ -2094,7 +2102,7 @@ async fn pruning_test( ); check_chain_dump( &harness, - (num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64, + num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1, ); let all_canonical_states = harness @@ -2613,8 +2621,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { harness.advance_slot(); } harness.extend_to_slot(finalizing_slot - 1).await; - harness - .add_block_at_slot(finalizing_slot, harness.get_current_state()) + Box::pin(harness.add_block_at_slot(finalizing_slot, harness.get_current_state())) .await .unwrap(); @@ -2789,6 +2796,7 @@ async fn finalizes_after_resuming_from_db() { ); } +#[allow(clippy::large_stack_frames)] #[tokio::test] async fn revert_minority_fork_on_resume() { let validator_count = 16; diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index d1b3139d42c..6d30b8a4e32 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -73,7 +73,7 @@ fn get_valid_sync_committee_message_for_block( let head_state = harness.chain.head_beacon_state_cloned(); let (signature, _) = harness .make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee) - .get(0) + .first() .expect("sync messages should exist") .get(message_index) .expect("first sync message should exist") @@ -104,7 +104,7 @@ fn get_valid_sync_contribution( ); let (_, contribution_opt) = sync_contributions - .get(0) + .first() .expect("sync contributions should exist"); let contribution = contribution_opt .as_ref() diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7ae34ccf387..c641f32b820 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -170,7 +170,7 @@ async fn find_reorgs() { harness .extend_chain( - num_blocks_produced as usize, + num_blocks_produced, BlockStrategy::OnCanonicalHead, // No need to produce attestations for this test. AttestationStrategy::SomeValidators(vec![]), @@ -203,7 +203,7 @@ async fn find_reorgs() { assert_eq!( find_reorg_slot( &harness.chain, - &head_state, + head_state, harness.chain.head_beacon_block().canonical_root() ), head_slot @@ -503,7 +503,6 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) - .into_iter() .map(|validator_index| { let slot = state .get_attestation_duties(validator_index, RelativeEpoch::Current) diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 9273137bf6d..c96e0868d73 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -4,22 +4,22 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -slog = { workspace = true } -itertools = { workspace = true } -logging = { workspace = true } -tokio = { workspace = true } -tokio-util = { workspace = true } -futures = { workspace = true } fnv = { workspace = true } -strum = { workspace = true } -task_executor = { workspace = true } -slot_clock = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } lighthouse_network = { workspace = true } -types = { workspace = true } +logging = { workspace = true } metrics = { workspace = true } -parking_lot = { workspace = true } num_cpus = { workspace = true } +parking_lot = { workspace = true } serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } +types = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["test-util"] } diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index c3658f45c73..3531e81c847 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,8 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +eth2 = { workspace = true } +lighthouse_version = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -eth2 = { workspace = true } serde = { workspace = true } -lighthouse_version = { workspace = true } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 21a6e42cc50..614115eb588 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,42 +5,41 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] +operation_pool = { workspace = true } serde_yaml = { workspace = true } state_processing = { workspace = true } -operation_pool = { workspace = true } tokio = { workspace = true } [dependencies] beacon_chain = { workspace = true } -store = { workspace = true } -network = { workspace = true } -timer = { path = "../timer" } -lighthouse_network = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } -slot_clock = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -error-chain = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } +beacon_processor = { workspace = true } +directory = { workspace = true } dirs = { workspace = true } +environment = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -kzg = { workspace = true } -sensitive_url = { workspace = true } +eth2_config = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } genesis = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -metrics = { workspace = true } -time = "0.3.5" -directory = { workspace = true } http_api = { workspace = true } http_metrics = { path = "../http_metrics" } +kzg = { workspace = true } +lighthouse_network = { workspace = true } +metrics = { workspace = true } +monitoring_api = { workspace = true } +network = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } slasher = { workspace = true } slasher_service = { path = "../../slasher/service" } -monitoring_api = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } -ethereum_ssz = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +task_executor = { workspace = true } +time = "0.3.5" +timer = { path = "../timer" } +tokio = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 961f5140f92..7c6a253aca4 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -7,7 +7,6 @@ use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; -use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -970,7 +969,6 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); - start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); start_availability_cache_maintenance_service( runtime_context.executor.clone(), beacon_chain.clone(), diff --git a/beacon_node/client/src/error.rs b/beacon_node/client/src/error.rs deleted file mode 100644 index 20cf6f9877b..00000000000 --- a/beacon_node/client/src/error.rs +++ /dev/null @@ -1,7 +0,0 @@ -use error_chain::error_chain; - -error_chain! { - links { - Network(network::error::Error, network::error::ErrorKind); - } -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e6042103e16..0b6550c208d 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -4,7 +4,6 @@ mod metrics; mod notifier; pub mod builder; -pub mod error; use beacon_chain::BeaconChain; use lighthouse_network::{Enr, Multiaddr, NetworkGlobals}; diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 50400a77e06..8ccd50aad8d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -5,27 +5,27 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] +environment = { workspace = true } eth1_test_rig = { workspace = true } serde_yaml = { workspace = true } sloggers = { workspace = true } -environment = { workspace = true } [dependencies] +eth2 = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } -serde = { workspace = true } -types = { workspace = true } +logging = { workspace = true } merkle_proof = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } slog = { workspace = true } -logging = { workspace = true } -superstruct = { workspace = true } -tokio = { workspace = true } state_processing = { workspace = true } -metrics = { workspace = true } +superstruct = { workspace = true } task_executor = { workspace = true } -eth2 = { workspace = true } -sensitive_url = { workspace = true } +tokio = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0ef101fae7c..7eb7b4a15e1 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -2,54 +2,53 @@ name = "execution_layer" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +alloy-consensus = { workspace = true } alloy-primitives = { workspace = true } -types = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } -reqwest = { workspace = true } -ethereum_serde_utils = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } -warp = { workspace = true } -jsonwebtoken = "9" +alloy-rlp = { workspace = true } +arc-swap = "1.6.0" +builder_client = { path = "../builder_client" } bytes = { workspace = true } -task_executor = { workspace = true } -hex = { workspace = true } -ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } eth2 = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethers-core = { workspace = true } +fixed_bytes = { workspace = true } +fork_choice = { workspace = true } +hash-db = "0.15.2" +hash256-std-hasher = "0.15.2" +hex = { workspace = true } +jsonwebtoken = "9" +keccak-hash = "0.10.0" kzg = { workspace = true } -state_processing = { workspace = true } -superstruct = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } lru = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +pretty_reqwest_error = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } +state_processing = { workspace = true } +strum = { workspace = true } +superstruct = { workspace = true } +task_executor = { workspace = true } tempfile = { workspace = true } -rand = { workspace = true } -zeroize = { workspace = true } -metrics = { workspace = true } -ethers-core = { workspace = true } -builder_client = { path = "../builder_client" } -fork_choice = { workspace = true } +tokio = { workspace = true } tokio-stream = { workspace = true } -strum = { workspace = true } -keccak-hash = "0.10.0" -hash256-std-hasher = "0.15.2" +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } triehash = "0.8.4" -hash-db = "0.15.2" -pretty_reqwest_error = { workspace = true } -arc-swap = "1.6.0" -eth2_network_config = { workspace = true } -alloy-rlp = { workspace = true } -alloy-consensus = { workspace = true } -lighthouse_version = { workspace = true } -fixed_bytes = { workspace = true } -sha2 = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +zeroize = { workspace = true } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index cdc172cff47..d3a32c7929b 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -7,7 +7,7 @@ use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ EncodableExecutionBlockHeader, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayloadRef, Hash256, + ExecutionPayloadRef, ExecutionRequests, Hash256, }; /// Calculate the block hash of an execution block. @@ -17,6 +17,7 @@ use types::{ pub fn calculate_execution_block_hash( payload: ExecutionPayloadRef, parent_beacon_block_root: Option, + execution_requests: Option<&ExecutionRequests>, ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a @@ -38,6 +39,7 @@ pub fn calculate_execution_block_hash( let rlp_blob_gas_used = payload.blob_gas_used().ok(); let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + let requests_root = execution_requests.map(|requests| requests.requests_hash()); // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( @@ -48,6 +50,7 @@ pub fn calculate_execution_block_hash( rlp_blob_gas_used, rlp_excess_blob_gas, parent_beacon_block_root, + requests_root, ); // Hash the RLP encoding of the block header. @@ -118,6 +121,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -149,6 +153,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -181,6 +186,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") @@ -211,6 +217,7 @@ mod test { blob_gas_used: Some(0x0u64), excess_blob_gas: Some(0x0u64), parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + requests_root: None, }; let expected_hash = Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") @@ -221,29 +228,30 @@ mod test { #[test] fn test_rlp_encode_block_electra() { let header = ExecutionBlockHeader { - parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(), + parent_hash: Hash256::from_str("a628f146df398a339768bd101f7dc41d828be79aca5dd02cc878a51bdbadd761").unwrap(), ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), - beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(), - state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(), - transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + beneficiary: Address::from_str("f97e180c050e5ab072211ad2c213eb5aee4df134").unwrap(), + state_root: Hash256::from_str("fdff009f8280bd113ebb4df8ce4e2dcc9322d43184a0b506e70b7f4823ca1253").unwrap(), + transactions_root: Hash256::from_str("452806578b4fa881cafb019c47e767e37e2249accf859159f00cddefb2579bb5").unwrap(), + receipts_root: Hash256::from_str("72ceac0f16a32041c881b3220d39ca506a286bef163c01a4d0821cd4027d31c7").unwrap(), + logs_bloom:<[u8; 256]>::from_hex("10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000").unwrap().into(), difficulty: Uint256::ZERO, - number: Uint256::from(97), - gas_limit: Uint256::from(27482534), - gas_used: Uint256::ZERO, - timestamp: 1692132829u64, - extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), - mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), + number: Uint256::from(8230), + gas_limit: Uint256::from(30000000), + gas_used: Uint256::from(3716848), + timestamp: 1730921268, + extra_data: hex::decode("d883010e0c846765746888676f312e32332e32856c696e7578").unwrap(), + mix_hash: Hash256::from_str("e87ca9a45b2e61bbe9080d897db1d584b5d2367d22e898af901091883b7b96ec").unwrap(), nonce: Hash64::ZERO, - base_fee_per_gas: Uint256::from(2374u64), + base_fee_per_gas: Uint256::from(7u64), withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), - blob_gas_used: Some(0x0u64), - excess_blob_gas: Some(0x0u64), - parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + blob_gas_used: Some(786432), + excess_blob_gas: Some(44695552), + parent_beacon_block_root: Some(Hash256::from_str("f3a888fee010ebb1ae083547004e96c254b240437823326fdff8354b1fc25629").unwrap()), + requests_root: Some(Hash256::from_str("9440d3365f07573919e1e9ac5178c20ec6fe267357ee4baf8b6409901f331b62").unwrap()), }; let expected_hash = - Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") + Hash256::from_str("61e67afc96bf21be6aab52c1ace1db48de7b83f03119b0644deb4b69e87e09e1") .unwrap(); test_rlp_encoding(&header, None, expected_hash); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index d4734be448d..33dc60d0378 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -812,7 +812,7 @@ impl HttpJsonRpc { new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, new_payload_request_electra - .execution_requests_list + .execution_requests .get_execution_requests_list(), ]); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index efd68f1023d..1c6639804e3 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -6,7 +6,9 @@ use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; +use types::execution_requests::{ + ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests, +}; use types::{Blob, FixedVector, KzgProof, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -339,25 +341,6 @@ impl From> for ExecutionPayload { } } -/// This is used to index into the `execution_requests` array. -#[derive(Debug, Copy, Clone)] -enum RequestPrefix { - Deposit, - Withdrawal, - Consolidation, -} - -impl RequestPrefix { - pub fn from_prefix(prefix: u8) -> Option { - match prefix { - 0 => Some(Self::Deposit), - 1 => Some(Self::Withdrawal), - 2 => Some(Self::Consolidation), - _ => None, - } - } -} - /// Format of `ExecutionRequests` received over the engine api. /// /// Array of ssz-encoded requests list encoded as hex bytes. @@ -379,7 +362,8 @@ impl TryFrom for ExecutionRequests { for (i, request) in value.0.into_iter().enumerate() { // hex string - let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request)) + .map_err(|e| format!("Invalid hex {:?}", e))?; match RequestPrefix::from_prefix(i as u8) { Some(RequestPrefix::Deposit) => { requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) @@ -431,7 +415,7 @@ pub struct JsonGetPayloadResponse { #[superstruct(only(V3, V4))] pub should_override_builder: bool, #[superstruct(only(V4))] - pub requests: JsonExecutionRequests, + pub execution_requests: JsonExecutionRequests, } impl TryFrom> for GetPayloadResponse { @@ -464,7 +448,7 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.requests.try_into()?, + requests: response.execution_requests.try_into()?, })) } } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 318779b7f3e..60bc8489744 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -44,7 +44,7 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, #[superstruct(only(Electra))] - pub execution_requests_list: &'block ExecutionRequests, + pub execution_requests: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -121,8 +121,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - let (header_hash, rlp_transactions_root) = - calculate_execution_block_hash(payload, parent_beacon_block_root); + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( + payload, + parent_beacon_block_root, + self.execution_requests().ok().copied(), + ); if header_hash != self.block_hash() { return Err(Error::BlockHashMismatch { @@ -185,7 +188,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, - execution_requests_list: &block_ref.body.execution_requests, + execution_requests: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 08a00d7bf8d..ae0dca9833f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -28,7 +28,7 @@ use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; +use std::collections::{hash_map::Entry, HashMap}; use std::fmt; use std::future::Future; use std::io::Write; @@ -319,10 +319,52 @@ impl> BlockProposalContents { + pub parent_hash: ExecutionBlockHash, + pub parent_gas_limit: u64, + pub proposer_gas_limit: Option, + pub payload_attributes: &'a PayloadAttributes, + pub forkchoice_update_params: &'a ForkchoiceUpdateParameters, + pub current_fork: ForkName, +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, preparation_data: ProposerPreparationData, + gas_limit: Option, +} + +impl ProposerPreparationDataEntry { + pub fn update(&mut self, updated: Self) -> bool { + let mut changed = false; + // Update `gas_limit` if `updated.gas_limit` is `Some` and: + // - `self.gas_limit` is `None`, or + // - both are `Some` but the values differ. + if let Some(updated_gas_limit) = updated.gas_limit { + if self.gas_limit != Some(updated_gas_limit) { + self.gas_limit = Some(updated_gas_limit); + changed = true; + } + } + + // Update `update_epoch` if it differs + if self.update_epoch != updated.update_epoch { + self.update_epoch = updated.update_epoch; + changed = true; + } + + // Update `preparation_data` if it differs + if self.preparation_data != updated.preparation_data { + self.preparation_data = updated.preparation_data; + changed = true; + } + + changed + } } #[derive(Hash, PartialEq, Eq)] @@ -711,23 +753,29 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - pub async fn update_proposer_preparation( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) { + pub async fn update_proposer_preparation<'a, I>(&self, update_epoch: Epoch, proposer_data: I) + where + I: IntoIterator)>, + { let mut proposer_preparation_data = self.proposer_preparation_data().await; - for preparation_entry in preparation_data { + + for (preparation_entry, gas_limit) in proposer_data { let new = ProposerPreparationDataEntry { update_epoch, preparation_data: preparation_entry.clone(), + gas_limit: *gas_limit, }; - let existing = - proposer_preparation_data.insert(preparation_entry.validator_index, new.clone()); - - if existing != Some(new) { - metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + match proposer_preparation_data.entry(preparation_entry.validator_index) { + Entry::Occupied(mut entry) => { + if entry.get_mut().update(new) { + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } + } + Entry::Vacant(entry) => { + entry.insert(new); + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } } } } @@ -809,6 +857,13 @@ impl ExecutionLayer { } } + pub async fn get_proposer_gas_limit(&self, proposer_index: u64) -> Option { + self.proposer_preparation_data() + .await + .get(&proposer_index) + .and_then(|entry| entry.gas_limit) + } + /// Maps to the `engine_getPayload` JSON-RPC call. /// /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing @@ -818,14 +873,10 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] pub async fn get_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, spec: &ChainSpec, builder_boost_factor: Option, block_production_version: BlockProductionVersion, @@ -833,11 +884,8 @@ impl ExecutionLayer { let payload_result_type = match block_production_version { BlockProductionVersion::V3 => match self .determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - current_fork, builder_boost_factor, spec, ) @@ -857,25 +905,11 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::GET_BLINDED_PAYLOAD], ); - self.determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, - builder_params, - current_fork, - None, - spec, - ) - .await? + self.determine_and_fetch_payload(payload_parameters, builder_params, None, spec) + .await? } BlockProductionVersion::FullV2 => self - .get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - noop, - ) + .get_full_payload_with(payload_parameters, noop) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local)?, @@ -922,17 +956,15 @@ impl ExecutionLayer { async fn fetch_builder_and_local_payloads( &self, builder: &BuilderHttpClient, - parent_hash: ExecutionBlockHash, builder_params: &BuilderParams, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> ( Result>>, builder_client::Error>, Result, Error>, ) { let slot = builder_params.slot; let pubkey = &builder_params.pubkey; + let parent_hash = payload_parameters.parent_hash; info!( self.log(), @@ -950,17 +982,12 @@ impl ExecutionLayer { .await }), timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { - self.get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) - .await - .and_then(|local_result_type| match local_result_type { - GetPayloadResponseType::Full(payload) => Ok(payload), - GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), - }) + self.get_full_payload_caching(payload_parameters) + .await + .and_then(|local_result_type| match local_result_type { + GetPayloadResponseType::Full(payload) => Ok(payload), + GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), + }) }) ); @@ -984,26 +1011,17 @@ impl ExecutionLayer { (relay_result, local_result) } - #[allow(clippy::too_many_arguments)] async fn determine_and_fetch_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, builder_boost_factor: Option, spec: &ChainSpec, ) -> Result>, Error> { let Some(builder) = self.builder() else { // no builder.. return local payload return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); @@ -1034,26 +1052,15 @@ impl ExecutionLayer { ), } return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); } + let parent_hash = payload_parameters.parent_hash; let (relay_result, local_result) = self - .fetch_builder_and_local_payloads( - builder.as_ref(), - parent_hash, - &builder_params, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .fetch_builder_and_local_payloads(builder.as_ref(), &builder_params, payload_parameters) .await; match (relay_result, local_result) { @@ -1118,14 +1125,9 @@ impl ExecutionLayer { ); // check relay payload validity - if let Err(reason) = verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - Some(local.block_number()), - current_fork, - spec, - ) { + if let Err(reason) = + verify_builder_bid(&relay, payload_parameters, Some(local.block_number()), spec) + { // relay payload invalid -> return local metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -1202,14 +1204,7 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); - match verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - None, - current_fork, - spec, - ) { + match verify_builder_bid(&relay, payload_parameters, None, spec) { Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), Err(reason) => { metrics::inc_counter_vec( @@ -1234,32 +1229,28 @@ impl ExecutionLayer { /// Get a full payload and cache its result in the execution layer's payload cache. async fn get_full_payload_caching( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> Result, Error> { - self.get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - Self::cache_payload, - ) - .await + self.get_full_payload_with(payload_parameters, Self::cache_payload) + .await } async fn get_full_payload_with( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, cache_fn: fn( &ExecutionLayer, PayloadContentsRefTuple, ) -> Option>, ) -> Result, Error> { + let PayloadParameters { + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + .. + } = payload_parameters; + self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine @@ -1984,6 +1975,10 @@ enum InvalidBuilderPayload { payload: Option, expected: Option, }, + GasLimitMismatch { + payload: u64, + expected: u64, + }, } impl fmt::Display for InvalidBuilderPayload { @@ -2022,19 +2017,51 @@ impl fmt::Display for InvalidBuilderPayload { opt_string(expected) ) } + InvalidBuilderPayload::GasLimitMismatch { payload, expected } => { + write!(f, "payload gas limit was {} not {}", payload, expected) + } } } } +/// Calculate the expected gas limit for a block. +pub fn expected_gas_limit( + parent_gas_limit: u64, + target_gas_limit: u64, + spec: &ChainSpec, +) -> Option { + // Calculate the maximum gas limit difference allowed safely + let max_gas_limit_difference = parent_gas_limit + .checked_div(spec.gas_limit_adjustment_factor) + .and_then(|result| result.checked_sub(1)) + .unwrap_or(0); + + // Adjust the gas limit safely + if target_gas_limit > parent_gas_limit { + let gas_diff = target_gas_limit.saturating_sub(parent_gas_limit); + parent_gas_limit.checked_add(std::cmp::min(gas_diff, max_gas_limit_difference)) + } else { + let gas_diff = parent_gas_limit.saturating_sub(target_gas_limit); + parent_gas_limit.checked_sub(std::cmp::min(gas_diff, max_gas_limit_difference)) + } +} + /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. fn verify_builder_bid( bid: &ForkVersionedResponse>, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, + payload_parameters: PayloadParameters<'_>, block_number: Option, - current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { + let PayloadParameters { + parent_hash, + payload_attributes, + current_fork, + parent_gas_limit, + proposer_gas_limit, + .. + } = payload_parameters; + let is_signature_valid = bid.data.verify_signature(spec); let header = &bid.data.message.header(); @@ -2050,6 +2077,8 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); let payload_withdrawals_root = header.withdrawals_root().ok(); + let expected_gas_limit = proposer_gas_limit + .and_then(|target_gas_limit| expected_gas_limit(parent_gas_limit, target_gas_limit, spec)); if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { @@ -2086,6 +2115,14 @@ fn verify_builder_bid( payload: payload_withdrawals_root, expected: expected_withdrawals_root, })) + } else if expected_gas_limit + .map(|gas_limit| header.gas_limit() != gas_limit) + .unwrap_or(false) + { + Err(Box::new(InvalidBuilderPayload::GasLimitMismatch { + payload: header.gas_limit(), + expected: expected_gas_limit.unwrap_or(0), + })) } else { Ok(()) } @@ -2138,6 +2175,27 @@ mod test { .await; } + #[tokio::test] + async fn test_expected_gas_limit() { + let spec = ChainSpec::mainnet(); + assert_eq!( + expected_gas_limit(30_000_000, 30_000_000, &spec), + Some(30_000_000) + ); + assert_eq!( + expected_gas_limit(30_000_000, 40_000_000, &spec), + Some(30_029_295) + ); + assert_eq!( + expected_gas_limit(30_029_295, 40_000_000, &spec), + Some(30_058_619) + ); + assert_eq!( + expected_gas_limit(30_058_619, 30_000_000, &spec), + Some(30_029_266) + ); + } + #[tokio::test] async fn test_forked_terminal_block() { let runtime = TestRuntime::default(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4deb91e0567..4fab7150ce3 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -28,8 +28,8 @@ use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); -const GAS_LIMIT: u64 = 16384; -const GAS_USED: u64 = GAS_LIMIT - 1; +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; #[derive(Clone, Debug, PartialEq)] #[allow(clippy::large_enum_variant)] // This struct is only for testing. @@ -38,6 +38,10 @@ pub enum Block { PoS(ExecutionPayload), } +pub fn mock_el_extra_data() -> types::VariableList { + "block gen was here".as_bytes().to_vec().into() +} + impl Block { pub fn block_number(&self) -> u64 { match self { @@ -67,6 +71,13 @@ impl Block { } } + pub fn gas_limit(&self) -> u64 { + match self { + Block::PoW(_) => DEFAULT_GAS_LIMIT, + Block::PoS(payload) => payload.gas_limit(), + } + } + pub fn as_execution_block(&self, total_difficulty: Uint256) -> ExecutionBlock { match self { Block::PoW(block) => ExecutionBlock { @@ -570,10 +581,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -587,10 +598,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -603,10 +614,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -623,10 +634,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -642,10 +653,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 786ac9ad9c9..9365024ffb7 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -374,7 +374,7 @@ pub async fn handle_rpc( .into(), should_override_builder: false, // TODO(electra): add EL requests in mock el - requests: Default::default(), + execution_requests: Default::default(), }) .unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 341daedbc8d..879b54eb075 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; -use crate::{Config, ExecutionLayer, PayloadAttributes}; +use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; use fork_choice::ForkchoiceUpdateParameters; @@ -54,6 +54,10 @@ impl Operation { } } +pub fn mock_builder_extra_data() -> types::VariableList { + "mock_builder".as_bytes().to_vec().into() +} + #[derive(Debug)] // We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`. struct Custom(#[allow(dead_code)] String); @@ -72,6 +76,8 @@ pub trait BidStuff { fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; + + fn stamp_payload(&mut self); } impl BidStuff for BuilderBid { @@ -203,6 +209,29 @@ impl BidStuff for BuilderBid { let message = self.signing_root(domain); sk.sign(message) } + + // this helps differentiate a builder block from a regular block + fn stamp_payload(&mut self) { + let extra_data = mock_builder_extra_data::(); + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Electra(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + } + } } #[derive(Clone)] @@ -286,6 +315,7 @@ impl MockBuilder { while let Some(op) = guard.pop() { op.apply(bid); } + bid.stamp_payload(); } } @@ -413,11 +443,12 @@ pub fn serve( let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block + let head_execution_payload = block .body() .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); + .map_err(|_| reject("pre-merge block"))?; + let head_execution_hash = head_execution_payload.block_hash(); + let head_gas_limit = head_execution_payload.gas_limit(); if head_execution_hash != parent_hash { return Err(reject("head mismatch")); } @@ -529,14 +560,24 @@ pub fn serve( finalized_hash: Some(finalized_execution_hash), }; + let proposer_gas_limit = builder + .val_registration_cache + .read() + .get(&pubkey) + .map(|v| v.message.gas_limit); + + let payload_parameters = PayloadParameters { + parent_hash: head_execution_hash, + parent_gas_limit: head_gas_limit, + proposer_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let payload_response_type = builder .el - .get_full_payload_caching( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) + .get_full_payload_caching(payload_parameters) .await .map_err(|_| reject("couldn't get payload"))?; @@ -648,8 +689,6 @@ pub fn serve( } }; - message.set_gas_limit(cached_data.gas_limit); - builder.apply_operations(&mut message); let mut signature = diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index a9f1313e462..48372a39be1 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -90,6 +90,7 @@ impl MockExecutionLayer { }; let parent_hash = latest_execution_block.block_hash(); + let parent_gas_limit = latest_execution_block.gas_limit(); let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); @@ -131,14 +132,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::FullV2, @@ -171,14 +178,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::BlindedV2, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 1e71fde2551..faf6d4ef0b6 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,12 +25,13 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; +pub use execution_block_generator::DEFAULT_GAS_LIMIT; pub use execution_block_generator::{ generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, - static_valid_tx, Block, ExecutionBlockGenerator, + mock_el_extra_data, static_valid_tx, Block, ExecutionBlockGenerator, }; pub use hook::Hook; -pub use mock_builder::{MockBuilder, Operation}; +pub use mock_builder::{mock_builder_extra_data, MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 638fe0f2192..5d601008bc0 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -6,49 +6,49 @@ edition = { workspace = true } autotests = false # using a single test binary compiles faster [dependencies] -warp = { workspace = true } -serde = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -types = { workspace = true } -hex = { workspace = true } beacon_chain = { workspace = true } +beacon_processor = { workspace = true } +bs58 = "0.4.0" +bytes = { workspace = true } +directory = { workspace = true } +eth1 = { workspace = true } eth2 = { workspace = true } -slog = { workspace = true } -network = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } lighthouse_network = { workspace = true } -eth1 = { workspace = true } -state_processing = { workspace = true } lighthouse_version = { workspace = true } +logging = { workspace = true } +lru = { workspace = true } metrics = { workspace = true } -warp_utils = { workspace = true } -slot_clock = { workspace = true } -ethereum_ssz = { workspace = true } -bs58 = "0.4.0" -futures = { workspace = true } -execution_layer = { workspace = true } +network = { workspace = true } +operation_pool = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } safe_arith = { workspace = true } -task_executor = { workspace = true } -lru = { workspace = true } -tree_hash = { workspace = true } -sysinfo = { workspace = true } -system_health = { path = "../../common/system_health" } -directory = { workspace = true } -logging = { workspace = true } -ethereum_serde_utils = { workspace = true } -operation_pool = { workspace = true } sensitive_url = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +state_processing = { workspace = true } store = { workspace = true } -bytes = { workspace = true } -beacon_processor = { workspace = true } -rand = { workspace = true } +sysinfo = { workspace = true } +system_health = { path = "../../common/system_health" } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } [dev-dependencies] -serde_json = { workspace = true } -proto_array = { workspace = true } genesis = { workspace = true } logging = { workspace = true } +proto_array = { workspace = true } +serde_json = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index dba8eb1ef32..b9e48833184 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,4 +1,5 @@ use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; @@ -9,6 +10,7 @@ use types::{ BlobSidecarList, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; +use warp::Rejection; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -261,7 +263,7 @@ impl BlockId { #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, - indices: BlobIndicesQuery, + query: BlobIndicesQuery, chain: &BeaconChain, ) -> Result< ( @@ -286,20 +288,32 @@ impl BlockId { // Return the `BlobSidecarList` identified by `self`. let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { - chain - .store - .get_blobs(&root) - .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "no blobs stored for block {root}" - )) - })? + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? + } else { + Self::get_blobs(chain, root, query.indices)? + } } else { BlobSidecarList::default() }; - let blob_sidecar_list_filtered = match indices.indices { + Ok((block, blob_sidecar_list, execution_optimistic, finalized)) + } + + fn get_blobs( + chain: &BeaconChain, + root: Hash256, + indices: Option>, + ) -> Result, Rejection> { + let blob_sidecar_list = chain + .store + .get_blobs(&root) + .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) + })?; + + let blob_sidecar_list_filtered = match indices { Some(vec) => { let list = blob_sidecar_list .into_iter() @@ -310,12 +324,48 @@ impl BlockId { } None => blob_sidecar_list, }; - Ok(( - block, - blob_sidecar_list_filtered, - execution_optimistic, - finalized, - )) + + Ok(blob_sidecar_list_filtered) + } + + fn get_blobs_from_data_columns( + chain: &BeaconChain, + root: Hash256, + blob_indices: Option>, + block: &SignedBlindedBeaconBlock<::EthSpec>, + ) -> Result, Rejection> { + let column_indices = chain.store.get_data_column_keys(root).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error fetching data columns keys: {e:?}" + )) + })?; + + let num_found_column_keys = column_indices.len(); + let num_required_columns = chain.spec.number_of_columns / 2; + let is_blob_available = num_found_column_keys >= num_required_columns; + + if is_blob_available { + let data_columns = column_indices + .into_iter() + .filter_map( + |column_index| match chain.get_data_column(&root, &column_index) { + Ok(Some(data_column)) => Some(Ok(data_column)), + Ok(None) => None, + Err(e) => Some(Err(warp_utils::reject::beacon_chain_error(e))), + }, + ) + .collect::, _>>()?; + + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }) + } else { + Err(warp_utils::reject::custom_server_error( + format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") + )) + } } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fe05f55a01a..23d177da785 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3704,7 +3704,10 @@ pub fn serve( ); execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) .await; chain @@ -3762,7 +3765,7 @@ pub fn serve( let spec = &chain.spec; let (preparation_data, filtered_registration_data): ( - Vec, + Vec<(ProposerPreparationData, Option)>, Vec, ) = register_val_data .into_iter() @@ -3792,12 +3795,15 @@ pub fn serve( // Filter out validators who are not 'active' or 'pending'. is_active_or_pending.then_some({ ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), register_data, ) }) @@ -3807,7 +3813,10 @@ pub fn serve( // Update the prepare beacon proposer cache based on this request. execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) .await; // Call prepare beacon proposer blocking with the latest update in order to make diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 1338f4f1802..e1ecf2d4fc3 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -322,7 +322,7 @@ pub async fn consensus_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn consensus_partial_pass_only_consensus() { /* this test targets gossip-level validation */ - let validation_level: Option = Some(BroadcastValidation::Consensus); + let validation_level = BroadcastValidation::Consensus; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -378,7 +378,7 @@ pub async fn consensus_partial_pass_only_consensus() { tester.harness.chain.clone(), &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -615,8 +615,7 @@ pub async fn equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -671,7 +670,7 @@ pub async fn equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1228,8 +1227,7 @@ pub async fn blinded_equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -1311,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1465,8 +1463,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { "need at least 2 blobs for partial reveal" ); - let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()]; - let partial_blobs = vec![blobs.1.get(0).unwrap().clone()]; + let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; + let partial_blobs = vec![blobs.1.first().unwrap().clone()]; // Simulate the block being seen on gossip. block diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index c3ed3347821..e45dcf221cc 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -139,7 +139,7 @@ impl ForkChoiceUpdates { fn insert(&mut self, update: ForkChoiceUpdateMetadata) { self.updates .entry(update.state.head_block_hash) - .or_insert_with(Vec::new) + .or_default() .push(update); } @@ -447,9 +447,14 @@ pub async fn proposer_boost_re_org_test( // Send proposer preparation data for all validators. let proposer_preparation_data = all_validators .iter() - .map(|i| ProposerPreparationData { - validator_index: *i as u64, - fee_recipient: Address::from_low_u64_be(*i as u64), + .map(|i| { + ( + ProposerPreparationData { + validator_index: *i as u64, + fee_recipient: Address::from_low_u64_be(*i as u64), + }, + None, + ) }) .collect::>(); harness @@ -459,7 +464,7 @@ pub async fn proposer_boost_re_org_test( .unwrap() .update_proposer_preparation( head_slot.epoch(E::slots_per_epoch()) + 1, - &proposer_preparation_data, + proposer_preparation_data.iter().map(|(a, b)| (a, b)), ) .await; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 01731530d36..dd481f23bae 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -57,18 +57,18 @@ async fn el_syncing_then_synced() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // EL synced mock_el.server.set_syncing_response(Ok(false)); mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL is offline (errors on upcheck). @@ -85,9 +85,9 @@ async fn el_offline() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline. @@ -128,9 +128,9 @@ async fn el_error_on_new_payload() { // The EL should now be *offline* according to the API. let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // Processing a block successfully should remove the status. mock_el.server.set_new_payload_status( @@ -144,9 +144,9 @@ async fn el_error_on_new_payload() { harness.process_block_result((block, blobs)).await.unwrap(); let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `node health` endpoint when the EL is offline. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 940f3ae9c0c..7007a14466c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -13,8 +13,10 @@ use eth2::{ Error::ServerMessage, StatusCode, Timeouts, }; +use execution_layer::expected_gas_limit; use execution_layer::test_utils::{ - MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + mock_builder_extra_data, mock_el_extra_data, MockBuilder, Operation, + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; @@ -272,10 +274,10 @@ impl ApiTester { let mock_builder_server = harness.set_mock_builder(beacon_url.clone()); // Start the mock builder service prior to building the chain out. - harness.runtime.task_executor.spawn( - async move { mock_builder_server.await }, - "mock_builder_server", - ); + harness + .runtime + .task_executor + .spawn(mock_builder_server, "mock_builder_server"); let mock_builder = harness.mock_builder.clone(); @@ -348,7 +350,6 @@ impl ApiTester { let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); - let log = test_logger(); let ApiServer { @@ -640,7 +641,7 @@ impl ApiTester { self } - pub async fn test_beacon_blocks_finalized(self) -> Self { + pub async fn test_beacon_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -677,7 +678,7 @@ impl ApiTester { self } - pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -818,7 +819,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let unsupported_media_response = self @@ -858,7 +859,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -909,7 +910,7 @@ impl ApiTester { for i in validator_indices { if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), }); } @@ -943,7 +944,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -1011,7 +1012,7 @@ impl ApiTester { || statuses.contains(&status.superstatus()) { validators.push(ValidatorData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), status, validator, @@ -1640,11 +1641,7 @@ impl ApiTester { let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); let blob_indices = if use_indices { - Some( - (0..num_blobs.saturating_sub(1) as u64) - .into_iter() - .collect::>(), - ) + Some((0..num_blobs.saturating_sub(1) as u64).collect::>()) } else { None }; @@ -1662,7 +1659,7 @@ impl ApiTester { blob_indices.map_or(num_blobs, |indices| indices.len()) ); let expected = block.slot(); - assert_eq!(result.get(0).unwrap().slot(), expected); + assert_eq!(result.first().unwrap().slot(), expected); self } @@ -1700,9 +1697,9 @@ impl ApiTester { break; } } - let test_slot = test_slot.expect(&format!( - "should be able to find a block matching zero_blobs={zero_blobs}" - )); + let test_slot = test_slot.unwrap_or_else(|| { + panic!("should be able to find a block matching zero_blobs={zero_blobs}") + }); match self .client @@ -1771,7 +1768,6 @@ impl ApiTester { .attestations() .map(|att| att.clone_as_attestation()) .collect::>() - .into() }, ); @@ -1908,7 +1904,7 @@ impl ApiTester { let result = match self .client - .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .get_beacon_light_client_updates::(current_sync_committee_period, 1) .await { Ok(result) => result, @@ -1920,7 +1916,7 @@ impl ApiTester { .light_client_server_cache .get_light_client_updates( &self.chain.store, - current_sync_committee_period as u64, + current_sync_committee_period, 1, &self.chain.spec, ) @@ -2313,7 +2309,7 @@ impl ApiTester { .unwrap() .data .is_syncing; - assert_eq!(is_syncing, true); + assert!(is_syncing); // Reset sync state. *self @@ -2363,7 +2359,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id.clone()) + .get_node_peers_by_id(self.external_peer_id) .await .unwrap() .data; @@ -3513,6 +3509,7 @@ impl ApiTester { self } + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. pub async fn test_get_validator_aggregate_attestation(self) -> Self { if self .chain @@ -3755,7 +3752,11 @@ impl ApiTester { self } - pub async fn test_post_validator_register_validator(self) -> Self { + async fn generate_validator_registration_data( + &self, + fee_recipient_generator: impl Fn(usize) -> Address, + gas_limit: u64, + ) -> (Vec, Vec
) { let mut registrations = vec![]; let mut fee_recipients = vec![]; @@ -3766,15 +3767,13 @@ impl ApiTester { epoch: genesis_epoch, }; - let expected_gas_limit = 11_111_111; - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); + let fee_recipient = fee_recipient_generator(val_index); let data = ValidatorRegistrationData { fee_recipient, - gas_limit: expected_gas_limit, + gas_limit, timestamp: 0, pubkey, }; @@ -3797,6 +3796,17 @@ impl ApiTester { registrations.push(signed); } + (registrations, fee_recipients) + } + + pub async fn test_post_validator_register_validator(self) -> Self { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; + self.client .post_validator_register_validator(®istrations) .await @@ -3811,14 +3821,22 @@ impl ApiTester { .zip(fee_recipients.into_iter()) .enumerate() { - let actual = self + let actual_fee_recipient = self .chain .execution_layer .as_ref() .unwrap() .get_suggested_fee_recipient(val_index as u64) .await; - assert_eq!(actual, fee_recipient); + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT)); } self @@ -3839,46 +3857,12 @@ impl ApiTester { ) .await; - let mut registrations = vec![]; - let mut fee_recipients = vec![]; - - let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); - let fork = Fork { - current_version: self.chain.spec.genesis_fork_version, - previous_version: self.chain.spec.genesis_fork_version, - epoch: genesis_epoch, - }; - - let expected_gas_limit = 11_111_111; - - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { - let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); - - let data = ValidatorRegistrationData { - fee_recipient, - gas_limit: expected_gas_limit, - timestamp: 0, - pubkey, - }; - - let domain = self.chain.spec.get_domain( - genesis_epoch, - Domain::ApplicationMask(ApplicationDomain::Builder), - &fork, - Hash256::zero(), - ); - let message = data.signing_root(domain); - let signature = keypair.sk.sign(message); - - let signed = SignedValidatorRegistrationData { - message: data, - signature, - }; - - fee_recipients.push(fee_recipient); - registrations.push(signed); - } + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; self.client .post_validator_register_validator(®istrations) @@ -3911,6 +3895,47 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator_higher_gas_limit(&self) { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT + 10_000_000, + ) + .await; + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual_fee_recipient = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT + 10_000_000)); + } + } + pub async fn test_post_validator_liveness_epoch(self) -> Self { let epoch = self.chain.epoch().unwrap(); let head_state = self.chain.head_beacon_state_cloned(); @@ -4029,9 +4054,9 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4056,9 +4081,10 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 16_384); + // This is the graffiti of the mock execution layer, not the builder. + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4083,9 +4109,9 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4107,9 +4133,9 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -4126,10 +4152,16 @@ impl ApiTester { pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { // Mutate gas limit. + let builder_limit = expected_gas_limit( + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_LIMIT + 10_000_000, + self.chain.spec.as_ref(), + ) + .expect("calculate expected gas limit"); self.mock_builder .as_ref() .unwrap() - .add_operation(Operation::GasLimit(30_000_000)); + .add_operation(Operation::GasLimit(builder_limit as usize)); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -4147,9 +4179,9 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 30_000_000); + assert_eq!(payload.gas_limit(), builder_limit); // This cache should not be populated because fallback should not have been used. assert!(self @@ -4159,6 +4191,49 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + + self + } + + pub async fn test_builder_payload_rejected_when_gas_limit_incorrect(self) -> Self { + self.test_post_validator_register_validator_higher_gas_limit() + .await; + + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::GasLimit(1)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4188,7 +4263,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), 30_000_000); @@ -4232,6 +4307,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -4315,6 +4393,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4404,6 +4485,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4491,6 +4575,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4577,6 +4664,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4647,6 +4737,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4707,6 +4800,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4780,6 +4876,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); // Without proposing, advance into the next slot, this should make us cross the threshold // number of skips, causing us to use the fallback. @@ -4809,6 +4907,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4915,6 +5015,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this // scenario starts at an epoch boundary). @@ -4954,6 +5056,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); self } @@ -5032,9 +5136,8 @@ impl ApiTester { pub async fn test_builder_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5061,7 +5164,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. @@ -5072,15 +5175,16 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5110,7 +5214,7 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); self @@ -5149,6 +5253,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -5214,6 +5321,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5279,6 +5389,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5343,6 +5456,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -5979,16 +6095,17 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(false)); // Change head to be optimistic. - self.chain + if let Some(head_node) = self + .chain .canonical_head .fork_choice_write_lock() .proto_array_mut() .core_proto_array_mut() .nodes .last_mut() - .map(|head_node| { - head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) - }); + { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + } // Check responses are now optimistic. let result = self @@ -6021,8 +6138,8 @@ async fn poll_events, eth2::Error>> + Unpin }; tokio::select! { - _ = collect_stream_fut => {events} - _ = tokio::time::sleep(timeout) => { return events; } + _ = collect_stream_fut => { events } + _ = tokio::time::sleep(timeout) => { events } } } @@ -6058,31 +6175,31 @@ async fn test_unsupported_media_response() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_get() { +async fn beacon_get_state_hashes() { ApiTester::new() - .await - .test_beacon_genesis() .await .test_beacon_states_root_finalized() .await - .test_beacon_states_fork_finalized() - .await .test_beacon_states_finality_checkpoints_finalized() .await - .test_beacon_headers_block_id_finalized() + .test_beacon_states_root() + .await + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info() { + ApiTester::new() .await - .test_beacon_blocks_finalized::() + .test_beacon_genesis() .await - .test_beacon_blinded_blocks_finalized::() + .test_beacon_states_fork_finalized() .await .test_debug_beacon_states_finalized() .await - .test_beacon_states_root() - .await .test_beacon_states_fork() .await - .test_beacon_states_finality_checkpoints() - .await .test_beacon_states_validators() .await .test_beacon_states_validator_balances() @@ -6092,6 +6209,18 @@ async fn beacon_get() { .test_beacon_states_validator_id() .await .test_beacon_states_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_blocks() { + ApiTester::new() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized() + .await + .test_beacon_blinded_blocks_finalized() .await .test_beacon_headers_all_slots() .await @@ -6106,6 +6235,12 @@ async fn beacon_get() { .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_pools() { + ApiTester::new() .await .test_get_beacon_pool_attestations() .await @@ -6682,6 +6817,8 @@ async fn post_validator_register_valid_v3() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_gas_limit_mutation() { ApiTester::new_mev_tester() + .await + .test_builder_payload_rejected_when_gas_limit_incorrect() .await .test_payload_accepts_mutated_gas_limit() .await; diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index 97ba72a2ac6..d92f986440c 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -3,24 +3,23 @@ name = "http_metrics" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } -serde = { workspace = true } -slog = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } lighthouse_network = { workspace = true } -slot_clock = { workspace = true } -metrics = { workspace = true } lighthouse_version = { workspace = true } -warp_utils = { workspace = true } malloc_utils = { workspace = true } +metrics = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } [dev-dependencies] -tokio = { workspace = true } +logging = { workspace = true } reqwest = { workspace = true } +tokio = { workspace = true } types = { workspace = true } -logging = { workspace = true } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c4fad997025..485f32b37a7 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,50 +5,49 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] -alloy-primitives = { workspace = true} +alloy-primitives = { workspace = true } +alloy-rlp = { workspace = true } +bytes = { workspace = true } +delay_map = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } discv5 = { workspace = true } -gossipsub = { workspace = true } -unsigned-varint = { version = "0.8", features = ["codec"] } -ssz_types = { workspace = true } -types = { workspace = true } -serde = { workspace = true } +either = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -slog = { workspace = true } -lighthouse_version = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } -error-chain = { workspace = true } -dirs = { workspace = true } fnv = { workspace = true } -metrics = { workspace = true } -smallvec = { workspace = true } -tokio-io-timeout = "1" +futures = { workspace = true } +gossipsub = { workspace = true } +hex = { workspace = true } +itertools = { workspace = true } +libp2p-mplex = "0.42" +lighthouse_version = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } -sha2 = { workspace = true } -snap = { workspace = true } -hex = { workspace = true } -tokio-util = { workspace = true } -tiny-keccak = "2" -task_executor = { workspace = true } +prometheus-client = "0.22.0" rand = { workspace = true } -directory = { workspace = true } regex = { workspace = true } +serde = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +smallvec = { workspace = true } +snap = { workspace = true } +ssz_types = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } -prometheus-client = "0.22.0" +task_executor = { workspace = true } +tiny-keccak = "2" +tokio = { workspace = true } +tokio-io-timeout = "1" +tokio-util = { workspace = true } +types = { workspace = true } +unsigned-varint = { version = "0.8", features = ["codec"] } unused_port = { workspace = true } -delay_map = { workspace = true } -bytes = { workspace = true } -either = { workspace = true } -itertools = { workspace = true } -alloy-rlp = { workspace = true } # Local dependencies void = "1.0.2" -libp2p-mplex = "0.42" [dependencies.libp2p] version = "0.54" @@ -56,13 +55,13 @@ default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] [dev-dependencies] -slog-term = { workspace = true } -slog-async = { workspace = true } -tempfile = { workspace = true } -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } async-channel = { workspace = true } logging = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } +tempfile = { workspace = true } [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index a01d60dae99..61f5730c08f 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -wasm-bindgen = ["getrandom/js"] +wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"] rsa = [] [dependencies] @@ -22,12 +22,12 @@ bytes = "1.5" either = "1.9" fnv = "1.0.7" futures = "0.3.30" -futures-ticker = "0.0.3" futures-timer = "3.0.2" getrandom = "0.2.12" -hashlink.workspace = true +hashlink = { workspace = true } hex_fmt = "0.3.0" libp2p = { version = "0.54", default-features = false } +prometheus-client = "0.22.0" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" rand = "0.8" @@ -36,7 +36,6 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" tracing = "0.1.37" void = "1.0.2" -prometheus-client = "0.22.0" web-time = "1.1.0" [dev-dependencies] diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index f83a24baafe..537d2319c29 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -48,8 +48,7 @@ pub(crate) struct BackoffStorage { impl BackoffStorage { fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize { - ((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos()) - as usize + d.as_nanos().div_ceil(heartbeat_interval.as_nanos()) as usize } pub(crate) fn new( diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 5ead0c06a0a..c4e20e43972 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -29,8 +29,7 @@ use std::{ time::Duration, }; -use futures::StreamExt; -use futures_ticker::Ticker; +use futures::FutureExt; use hashlink::LinkedHashMap; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; @@ -74,6 +73,7 @@ use super::{ types::RpcOut, }; use super::{PublishError, SubscriptionError, TopicScoreParams, ValidationError}; +use futures_timer::Delay; use quick_protobuf::{MessageWrite, Writer}; use std::{cmp::Ordering::Equal, fmt::Debug}; @@ -301,7 +301,7 @@ pub struct Behaviour { mcache: MessageCache, /// Heartbeat interval stream. - heartbeat: Ticker, + heartbeat: Delay, /// Number of heartbeats since the beginning of time; this allows us to amortize some resource /// clean up -- eg backoff clean up. @@ -318,7 +318,7 @@ pub struct Behaviour { outbound_peers: HashSet, /// Stores optional peer score data together with thresholds and decay interval. - peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker)>, + peer_score: Option<(PeerScore, PeerScoreThresholds, Delay)>, /// Counts the number of `IHAVE` received from each peer since the last heartbeat. count_received_ihave: HashMap, @@ -466,10 +466,7 @@ where config.backoff_slack(), ), mcache: MessageCache::new(config.history_gossip(), config.history_length()), - heartbeat: Ticker::new_with_next( - config.heartbeat_interval(), - config.heartbeat_initial_delay(), - ), + heartbeat: Delay::new(config.heartbeat_interval() + config.heartbeat_initial_delay()), heartbeat_ticks: 0, px_peers: HashSet::new(), outbound_peers: HashSet::new(), @@ -682,9 +679,15 @@ where // Gossipsub peers None => { tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // `fanout_peers` is always non-empty if it's `Some`. + let fanout_peers = self + .fanout + .get(&topic_hash) + .map(|peers| if peers.is_empty() { None } else { Some(peers) }) + .unwrap_or(None); // If we have fanout peers add them to the map. - if self.fanout.contains_key(&topic_hash) { - for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + if let Some(peers) = fanout_peers { + for peer in peers { recipient_peers.insert(*peer); } } else { @@ -938,7 +941,7 @@ where return Err("Peer score set twice".into()); } - let interval = Ticker::new(params.decay_interval); + let interval = Delay::new(params.decay_interval); let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); self.peer_score = Some((peer_score, threshold, interval)); Ok(()) @@ -1208,7 +1211,7 @@ where } fn score_below_threshold_from_scores( - peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker)>, + peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay)>, peer_id: &PeerId, threshold: impl Fn(&PeerScoreThresholds) -> f64, ) -> (bool, f64) { @@ -3427,14 +3430,16 @@ where } // update scores - if let Some((peer_score, _, interval)) = &mut self.peer_score { - while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { + if let Some((peer_score, _, delay)) = &mut self.peer_score { + if delay.poll_unpin(cx).is_ready() { peer_score.refresh_scores(); + delay.reset(peer_score.params.decay_interval); } } - while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { + if self.heartbeat.poll_unpin(cx).is_ready() { self.heartbeat(); + self.heartbeat.reset(self.config.heartbeat_interval()); } Poll::Pending diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 713fe1f2668..90b8fe43fb5 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -25,6 +25,7 @@ use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::types::RpcReceiver; use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; use byteorder::{BigEndian, ByteOrder}; +use futures::StreamExt; use libp2p::core::ConnectedPoint; use rand::Rng; use std::net::Ipv4Addr; diff --git a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs index fa02f06f69d..ec6fe7bdb6e 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs @@ -44,7 +44,7 @@ mod tests; const TIME_CACHE_DURATION: u64 = 120; pub(crate) struct PeerScore { - params: PeerScoreParams, + pub(crate) params: PeerScoreParams, /// The score parameters. peer_stats: HashMap, /// Tracking peers per IP. diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index b91ad409164..578bb52b514 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -8,8 +8,8 @@ pub mod enr_ext; // Allow external use of the lighthouse ENR builder use crate::service::TARGET_SUBNET_PEERS; -use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use crate::{metrics, ClearDialError}; +use crate::{Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5}; pub use enr::{build_enr, load_enr_from_disk, use_or_load_enr, CombinedKey, Eth2Enr}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; @@ -205,7 +205,7 @@ impl Discovery { network_globals: Arc>, log: &slog::Logger, spec: &ChainSpec, - ) -> error::Result { + ) -> Result { let log = log.clone(); let enr_dir = match config.network_dir.to_str() { diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index ced803add80..2f8fd82c518 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -63,7 +63,7 @@ impl<'de> Deserialize<'de> for PeerIdSerialized { // A wrapper struct that prints a dial error nicely. struct ClearDialError<'a>(&'a DialError); -impl<'a> ClearDialError<'a> { +impl ClearDialError<'_> { fn most_inner_error(err: &(dyn std::error::Error)) -> &(dyn std::error::Error) { let mut current = err; while let Some(source) = current.source() { @@ -73,7 +73,7 @@ impl<'a> ClearDialError<'a> { } } -impl<'a> std::fmt::Display for ClearDialError<'a> { +impl std::fmt::Display for ClearDialError<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match &self.0 { DialError::Transport(errors) => { @@ -101,7 +101,7 @@ impl<'a> std::fmt::Display for ClearDialError<'a> { } pub use crate::types::{ - error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, + Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, SubnetDiscovery, }; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index c1e72d250ff..4df2566dacb 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -4,7 +4,7 @@ use crate::discovery::enr_ext::EnrExt; use crate::discovery::peer_id_to_node_id; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; use crate::service::TARGET_SUBNET_PEERS; -use crate::{error, metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; +use crate::{metrics, Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; use libp2p::identify::Info as IdentifyInfo; @@ -144,7 +144,7 @@ impl PeerManager { cfg: config::Config, network_globals: Arc>, log: &slog::Logger, - ) -> error::Result { + ) -> Result { let config::Config { discovery_enabled, metrics_enabled, diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 11676f9a01f..9fd059df857 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -141,10 +141,6 @@ impl NetworkBehaviour for PeerManager { debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error)); self.on_dial_failure(peer_id); } - FromSwarm::ExternalAddrConfirmed(_) => { - // We have an external address confirmed, means we are able to do NAT traversal. - metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1); - } _ => { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release // notes more than compiler feedback diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 42ece6dc4ff..7b3a59eac7e 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -104,15 +104,14 @@ impl RateLimiterConfig { pub const DEFAULT_META_DATA_QUOTA: Quota = Quota::n_every(2, 5); pub const DEFAULT_STATUS_QUOTA: Quota = Quota::n_every(5, 15); pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); - pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(1024, 10); + // The number is chosen to balance between upload bandwidth required to serve + // blocks and a decent syncing rate for honest nodes. Malicious nodes would need to + // spread out their requests over the time window to max out bandwidth on the server. + pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); - // `BlocksByRange` and `BlobsByRange` are sent together during range sync. - // It makes sense for blocks and blobs quotas to be equivalent in terms of the number of blocks: - // 1024 blocks * 6 max blobs per block. - // This doesn't necessarily mean that we are sending this many blobs, because the quotas are - // measured against the maximum request size. - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(6144, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(768, 10); + // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index e76d6d27866..0a0a6ca754f 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -964,6 +964,9 @@ where request_info: (Id, RequestType), error: StreamUpgradeError, ) { + // This dialing is now considered failed + self.dial_negotiated -= 1; + let (id, req) = request_info; // map the error @@ -989,9 +992,6 @@ where StreamUpgradeError::Apply(other) => other, }; - // This dialing is now considered failed - self.dial_negotiated -= 1; - self.outbound_io_error_retries = 0; self.events_out .push(HandlerEvent::Err(HandlerErr::Outbound { diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index cb228153908..85fabbb0c3c 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -67,7 +67,6 @@ pub struct SamplingRequestId(pub usize); #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct CustodyId { pub requester: CustodyRequester, - pub req_id: Id, } /// Downstream components that perform custody by root requests. diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 0ad31ff2e80..e46c69dc716 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -250,18 +250,17 @@ impl futures::stream::Stream for GossipCache { Poll::Ready(Some(expired)) => { let expected_key = expired.key(); let (topic, data) = expired.into_inner(); - match self.topic_msgs.get_mut(&topic) { - Some(msgs) => { - let key = msgs.remove(&data); - debug_assert_eq!(key, Some(expected_key)); - if msgs.is_empty() { - // no more messages for this topic. - self.topic_msgs.remove(&topic); - } - } - None => { - #[cfg(debug_assertions)] - panic!("Topic for registered message is not present.") + let topic_msg = self.topic_msgs.get_mut(&topic); + debug_assert!( + topic_msg.is_some(), + "Topic for registered message is not present." + ); + if let Some(msgs) = topic_msg { + let key = msgs.remove(&data); + debug_assert_eq!(key, Some(expected_key)); + if msgs.is_empty() { + // no more messages for this topic. + self.topic_msgs.remove(&topic); } } Poll::Ready(Some(Ok(topic))) diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index c6a764bb0ef..6fffd649f52 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -1,5 +1,5 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic}; -use crate::{error, TopicHash}; +use crate::TopicHash; use gossipsub::{IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams}; use std::cmp::max; use std::collections::HashMap; @@ -84,7 +84,7 @@ impl PeerScoreSettings { thresholds: &PeerScoreThresholds, enr_fork_id: &EnrForkId, current_slot: Slot, - ) -> error::Result { + ) -> Result { let mut params = PeerScoreParams { decay_interval: self.decay_interval, decay_to_zero: self.decay_to_zero, @@ -175,7 +175,7 @@ impl PeerScoreSettings { &self, active_validators: usize, current_slot: Slot, - ) -> error::Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams)> { + ) -> Result<(TopicScoreParams, TopicScoreParams, TopicScoreParams), String> { let (aggregators_per_slot, committees_per_slot) = self.expected_aggregator_count_per_slot(active_validators)?; let multiple_bursts_per_subnet_per_epoch = @@ -256,7 +256,7 @@ impl PeerScoreSettings { fn expected_aggregator_count_per_slot( &self, active_validators: usize, - ) -> error::Result<(f64, usize)> { + ) -> Result<(f64, usize), String> { let committees_per_slot = E::get_committee_count_per_slot_with( active_validators, self.max_committees_per_slot, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index b23e417adb0..afcbfce1732 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -20,7 +20,7 @@ use crate::types::{ }; use crate::EnrExt; use crate::Eth2Enr; -use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ @@ -38,6 +38,7 @@ use std::num::{NonZeroU8, NonZeroUsize}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; +use std::time::Duration; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; @@ -170,7 +171,7 @@ impl Network { executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, log: &slog::Logger, - ) -> error::Result<(Self, Arc>)> { + ) -> Result<(Self, Arc>), String> { let log = log.new(o!("service"=> "libp2p")); let config = ctx.config.clone(); @@ -466,6 +467,8 @@ impl Network { let config = libp2p::swarm::Config::with_executor(Executor(executor)) .with_notify_handler_buffer_size(NonZeroUsize::new(7).expect("Not zero")) .with_per_connection_event_buffer_size(4) + .with_idle_connection_timeout(Duration::from_secs(10)) // Other clients can timeout + // during negotiation .with_dial_concurrency_factor(NonZeroU8::new(1).unwrap()); let builder = SwarmBuilder::with_existing_identity(local_keypair) @@ -515,7 +518,7 @@ impl Network { /// - Starts listening in the given ports. /// - Dials boot-nodes and libp2p peers. /// - Subscribes to starting gossipsub topics. - async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { + async fn start(&mut self, config: &crate::NetworkConfig) -> Result<(), String> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); debug!(self.log, "Attempting to open listening ports"; config.listen_addrs(), "discovery_enabled" => !config.disable_discovery, "quic_enabled" => !config.disable_quic_support); @@ -920,7 +923,7 @@ impl Network { &mut self, active_validators: usize, current_slot: Slot, - ) -> error::Result<()> { + ) -> Result<(), String> { let (beacon_block_params, beacon_aggregate_proof_params, beacon_attestation_subnet_params) = self.score_settings .get_dynamic_topic_params(active_validators, current_slot)?; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index f4988e68cd5..490928c08c3 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,9 +1,7 @@ use crate::multiaddr::Protocol; use crate::rpc::methods::MetaDataV3; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; -use crate::types::{ - error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, -}; +use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind}; use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; use gossipsub; @@ -83,7 +81,7 @@ pub fn build_transport( // Useful helper functions for debugging. Currently not used in the client. #[allow(dead_code)] -fn keypair_from_hex(hex_bytes: &str) -> error::Result { +fn keypair_from_hex(hex_bytes: &str) -> Result { let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { stripped.to_string() } else { @@ -91,18 +89,18 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { }; hex::decode(hex_bytes) - .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e)) .and_then(keypair_from_bytes) } #[allow(dead_code)] -fn keypair_from_bytes(mut bytes: Vec) -> error::Result { +fn keypair_from_bytes(mut bytes: Vec) -> Result { secp256k1::SecretKey::try_from_bytes(&mut bytes) .map(|secret| { let keypair: secp256k1::Keypair = secret.into(); keypair.into() }) - .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e)) } /// Loads a private key from disk. If this fails, a new key is diff --git a/beacon_node/lighthouse_network/src/types/error.rs b/beacon_node/lighthouse_network/src/types/error.rs deleted file mode 100644 index a291e8fec5d..00000000000 --- a/beacon_node/lighthouse_network/src/types/error.rs +++ /dev/null @@ -1,5 +0,0 @@ -// generates error types - -use error_chain::error_chain; - -error_chain! {} diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 82558f6c977..6f266fd2bad 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -1,4 +1,3 @@ -pub mod error; mod globals; mod pubsub; mod subnet; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 500cd23faeb..44f6c54bbc4 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,52 +5,51 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] -sloggers = { workspace = true } +bls = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } genesis = { workspace = true } +gossipsub = { workspace = true } +kzg = { workspace = true } matches = "0.1.8" serde_json = { workspace = true } -slog-term = { workspace = true } slog-async = { workspace = true } -eth2 = { workspace = true } -gossipsub = { workspace = true } -eth2_network_config = { workspace = true } -kzg = { workspace = true } -bls = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } [dependencies] alloy-primitives = { workspace = true } -async-channel = { workspace = true } +alloy-rlp = { workspace = true } anyhow = { workspace = true } +async-channel = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } -slot_clock = { workspace = true } -slog = { workspace = true } -hex = { workspace = true } +beacon_processor = { workspace = true } +delay_map = { workspace = true } +derivative = { workspace = true } ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } -futures = { workspace = true } -error-chain = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -smallvec = { workspace = true } -rand = { workspace = true } +execution_layer = { workspace = true } fnv = { workspace = true } -alloy-rlp = { workspace = true } -metrics = { workspace = true } -logging = { workspace = true } -task_executor = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } igd-next = "0.14" itertools = { workspace = true } +lighthouse_network = { workspace = true } +logging = { workspace = true } lru_cache = { workspace = true } -strum = { workspace = true } -derivative = { workspace = true } -delay_map = { workspace = true } +metrics = { workspace = true } operation_pool = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +smallvec = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +types = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs deleted file mode 100644 index 1a964235e9b..00000000000 --- a/beacon_node/network/src/error.rs +++ /dev/null @@ -1,8 +0,0 @@ -// generates error types -use error_chain::error_chain; - -error_chain! { - links { - Libp2p(lighthouse_network::error::Error, lighthouse_network::error::ErrorKind); - } -} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 13a2569b750..2a7fedb53e9 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,5 +1,4 @@ /// This crate provides the network server for Lighthouse. -pub mod error; pub mod service; mod metrics; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 4b7e8a50a36..154a59eade7 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -2,7 +2,8 @@ use beacon_chain::{ attestation_verification::Error as AttnError, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::Error as SyncCommitteeError, AvailabilityProcessingStatus, + BlockError, }; use fnv::FnvHashMap; use lighthouse_network::{ @@ -11,12 +12,19 @@ use lighthouse_network::{ }; pub use metrics::*; use std::sync::{Arc, LazyLock}; +use strum::AsRefStr; use strum::IntoEnumIterator; use types::EthSpec; pub const SUCCESS: &str = "SUCCESS"; pub const FAILURE: &str = "FAILURE"; +#[derive(Debug, AsRefStr)] +pub(crate) enum BlockSource { + Gossip, + Rpc, +} + pub static BEACON_BLOCK_MESH_PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( @@ -59,6 +67,27 @@ pub static SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: LazyLock> = ) }); +/* + * Beacon processor + */ +pub static BEACON_PROCESSOR_MISSING_COMPONENTS: LazyLock> = LazyLock::new( + || { + try_create_int_counter_vec( + "beacon_processor_missing_components_total", + "Total number of imported individual block components that resulted in missing components", + &["source", "component"], + ) + }, +); +pub static BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_import_errors_total", + "Total number of block components that were not verified", + &["source", "component", "type"], + ) + }); + /* * Gossip processor */ @@ -606,6 +635,37 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub(crate) fn register_process_result_metrics( + result: &std::result::Result, + source: BlockSource, + block_component: &'static str, +) { + match result { + Ok(status) => match status { + AvailabilityProcessingStatus::Imported { .. } => match source { + BlockSource::Gossip => { + inc_counter(&BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + } + BlockSource::Rpc => { + inc_counter(&BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + } + }, + AvailabilityProcessingStatus::MissingComponents { .. } => { + inc_counter_vec( + &BEACON_PROCESSOR_MISSING_COMPONENTS, + &[source.as_ref(), block_component], + ); + } + }, + Err(error) => { + inc_counter_vec( + &BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE, + &[source.as_ref(), block_component, error.as_ref()], + ); + } + } +} + pub fn from_result(result: &std::result::Result) -> &str { match result { Ok(_) => SUCCESS, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index e92f4504762..f3c48e42f0b 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1,5 +1,5 @@ use crate::{ - metrics, + metrics::{self, register_process_result_metrics}, network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}, service::NetworkMessage, sync::SyncMessage, @@ -710,8 +710,19 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } + GossipDataColumnError::PriorKnown { .. } => { + // Data column is available via either the EL or reconstruction. + // Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available column sidecar. Ignoring the column sidecar"; + "slot" => %slot, + "block_root" => %block_root, + "index" => %index, + ) + } GossipDataColumnError::FutureSlot { .. } - | GossipDataColumnError::PriorKnown { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { debug!( self.log, @@ -852,7 +863,18 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } - GossipBlobError::FutureSlot { .. } | GossipBlobError::RepeatBlob { .. } => { + GossipBlobError::RepeatBlob { .. } => { + // We may have received the blob from the EL. Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available blob sidecar. Ignoring the blob sidecar"; + "slot" => %slot, + "root" => %root, + "index" => %index, + ) + } + GossipBlobError::FutureSlot { .. } => { debug!( self.log, "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; @@ -915,12 +937,11 @@ impl NetworkBeaconProcessor { let blob_index = verified_blob.id().index; let result = self.chain.process_gossip_blob(verified_blob).await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "blob"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - // Note: Reusing block imported metric here - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - debug!( + info!( self.log, "Gossipsub blob processed - imported fully available block"; "block_root" => %block_root @@ -989,43 +1010,39 @@ impl NetworkBeaconProcessor { let data_column_slot = verified_data_column.slot(); let data_column_index = verified_data_column.id().index; - match self + let result = self .chain .process_gossip_data_columns(vec![verified_data_column], || Ok(())) - .await - { - Ok(availability) => { - match availability { - AvailabilityProcessingStatus::Imported(block_root) => { - // Note: Reusing block imported metric here - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL, - ); - info!( - self.log, - "Gossipsub data column processed, imported fully available block"; - "block_root" => %block_root - ); - self.chain.recompute_head_at_current_slot().await; + .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "data_column"); - metrics::set_gauge( - &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, - processing_start_time.elapsed().as_millis() as i64, - ); - } - AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { - trace!( - self.log, - "Processed data column, waiting for other components"; - "slot" => %slot, - "data_column_index" => %data_column_index, - "block_root" => %block_root, - ); + match result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + self.log, + "Gossipsub data column processed, imported fully available block"; + "block_root" => %block_root + ); + self.chain.recompute_head_at_current_slot().await; - self.attempt_data_column_reconstruction(block_root).await; - } + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } - } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + self.log, + "Processed data column, waiting for other components"; + "slot" => %slot, + "data_column_index" => %data_column_index, + "block_root" => %block_root, + ); + + self.attempt_data_column_reconstruction(block_root).await; + } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -1444,6 +1461,20 @@ impl NetworkBeaconProcessor { } } + // Block is gossip valid. Attempt to fetch blobs from the EL using versioned hashes derived + // from kzg commitments, without having to wait for all blobs to be sent from the peers. + let publish_blobs = true; + let self_clone = self.clone(); + let block_clone = block.clone(); + self.executor.spawn( + async move { + self_clone + .fetch_engine_blobs_and_publish(block_clone, block_root, publish_blobs) + .await + }, + "fetch_blobs_gossip", + ); + let result = self .chain .process_block_with_early_caching( @@ -1453,11 +1484,10 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "block"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - if reprocess_tx .try_send(ReprocessQueueMessage::BlockImported { block_root: *block_root, @@ -1494,13 +1524,6 @@ impl NetworkBeaconProcessor { "slot" => slot, "block_root" => %block_root, ); - - // Block is valid, we can now attempt fetching blobs from EL using version hashes - // derived from kzg commitments from the block, without having to wait for all blobs - // to be sent from the peers if we already have them. - let publish_blobs = true; - self.fetch_engine_blobs_and_publish(block.clone(), *block_root, publish_blobs) - .await; } Err(BlockError::ParentUnknown { .. }) => { // This should not occur. It should be checked by `should_forward_block`. diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 6c6bb26ee09..817e6b64409 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,4 +1,4 @@ -use crate::metrics; +use crate::metrics::{self, register_process_result_metrics}; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::sync::BatchProcessResult; use crate::sync::{ @@ -163,8 +163,7 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; - - metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "block"); // RPC block imported, regardless of process type match result.as_ref() { @@ -286,6 +285,7 @@ impl NetworkBeaconProcessor { } let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "blobs"); match &result { Ok(AvailabilityProcessingStatus::Imported(hash)) => { @@ -343,6 +343,7 @@ impl NetworkBeaconProcessor { .chain .process_rpc_custody_columns(custody_columns) .await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "custody_columns"); match &result { Ok(availability) => match availability { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 9d774d97c15..7e27a91bd6b 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -527,7 +527,7 @@ impl TestRig { self.assert_event_journal( &expected .iter() - .map(|ev| Into::<&'static str>::into(ev)) + .map(Into::<&'static str>::into) .chain(std::iter::once(WORKER_FREED)) .chain(std::iter::once(NOTHING_TO_DO)) .collect::>(), diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index e1badfda9d5..0a99b6af0cf 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -5,7 +5,6 @@ //! syncing-related responses to the Sync manager. #![allow(clippy::unit_arg)] -use crate::error; use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::status_message; @@ -92,7 +91,7 @@ impl Router { beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, log: slog::Logger, - ) -> error::Result>> { + ) -> Result>, String> { let message_handler_log = log.new(o!("service"=> "router")); trace!(message_handler_log, "Service starting"); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 5a66cb7f30d..7826807e035 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,13 +1,10 @@ +use crate::metrics; use crate::nat; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; -use crate::subnet_service::SyncCommitteeService; -use crate::{error, metrics}; -use crate::{ - subnet_service::{AttestationService, SubnetServiceMessage}, - NetworkConfig, -}; +use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; +use crate::NetworkConfig; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend}; use futures::channel::mpsc::Sender; @@ -165,10 +162,8 @@ pub struct NetworkService { beacon_chain: Arc>, /// The underlying libp2p service that drives all the network interactions. libp2p: Network, - /// An attestation and subnet manager service. - attestation_service: AttestationService, - /// A sync committeee subnet manager service. - sync_committee_service: SyncCommitteeService, + /// An attestation and sync committee subnet manager service. + subnet_service: SubnetService, /// The receiver channel for lighthouse to communicate with the network service. network_recv: mpsc::UnboundedReceiver>, /// The receiver channel for lighthouse to send validator subscription requests. @@ -213,11 +208,14 @@ impl NetworkService { libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<( - NetworkService, - Arc>, - NetworkSenders, - )> { + ) -> Result< + ( + NetworkService, + Arc>, + NetworkSenders, + ), + String, + > { let network_log = executor.log().clone(); // build the channels for external comms let (network_senders, network_receivers) = NetworkSenders::new(); @@ -317,16 +315,13 @@ impl NetworkService { network_log.clone(), )?; - // attestation subnet service - let attestation_service = AttestationService::new( + // attestation and sync committee subnet service + let subnet_service = SubnetService::new( beacon_chain.clone(), network_globals.local_enr().node_id(), &config, &network_log, ); - // sync committee subnet service - let sync_committee_service = - SyncCommitteeService::new(beacon_chain.clone(), &config, &network_log); // create a timer for updating network metrics let metrics_update = tokio::time::interval(Duration::from_secs(METRIC_UPDATE_INTERVAL)); @@ -344,8 +339,7 @@ impl NetworkService { let network_service = NetworkService { beacon_chain, libp2p, - attestation_service, - sync_committee_service, + subnet_service, network_recv, validator_subscription_recv, router_send, @@ -376,7 +370,7 @@ impl NetworkService { libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<(Arc>, NetworkSenders)> { + ) -> Result<(Arc>, NetworkSenders), String> { let (network_service, network_globals, network_senders) = Self::build( beacon_chain, config, @@ -460,11 +454,8 @@ impl NetworkService { // handle a message from a validator requesting a subscription to a subnet Some(msg) = self.validator_subscription_recv.recv() => self.on_validator_subscription_msg(msg).await, - // process any attestation service events - Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), - - // process any sync committee service events - Some(msg) = self.sync_committee_service.next() => self.on_sync_committee_service_message(msg), + // process any subnet service events + Some(msg) = self.subnet_service.next() => self.on_subnet_service_msg(msg), event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, @@ -552,13 +543,14 @@ impl NetworkService { match message { // attestation information gets processed in the attestation service PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; + let subnet_id = subnet_and_attestation.0; let attestation = &subnet_and_attestation.1; // checks if we have an aggregator for the slot. If so, we should process // the attestation, else we just just propagate the Attestation. - let should_process = self - .attestation_service - .should_process_attestation(subnet, attestation); + let should_process = self.subnet_service.should_process_attestation( + Subnet::Attestation(subnet_id), + attestation, + ); self.send_to_router(RouterMessage::PubsubMessage( id, source, @@ -832,20 +824,12 @@ impl NetworkService { async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = self - .attestation_service - .validator_subscriptions(subscriptions.into_iter()) - { - warn!(self.log, "Attestation validator subscription failed"; "error" => e); - } + let subscriptions = subscriptions.into_iter().map(Subscription::Attestation); + self.subnet_service.validator_subscriptions(subscriptions) } ValidatorSubscriptionMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = self - .sync_committee_service - .validator_subscriptions(subscriptions) - { - warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); - } + let subscriptions = subscriptions.into_iter().map(Subscription::SyncCommittee); + self.subnet_service.validator_subscriptions(subscriptions) } } } @@ -881,7 +865,7 @@ impl NetworkService { } } - fn on_attestation_service_msg(&mut self, msg: SubnetServiceMessage) { + fn on_subnet_service_msg(&mut self, msg: SubnetServiceMessage) { match msg { SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { @@ -900,36 +884,9 @@ impl NetworkService { SubnetServiceMessage::EnrAdd(subnet) => { self.libp2p.update_enr_subnet(subnet, true); } - SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p.update_enr_subnet(subnet, false); - } - SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p.discover_subnet_peers(subnets_to_discover); - } - } - } - - fn on_sync_committee_service_message(&mut self, msg: SubnetServiceMessage) { - match msg { - SubnetServiceMessage::Subscribe(subnet) => { - for fork_digest in self.required_gossip_fork_digests() { - let topic = - GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.subscribe(topic); - } - } - SubnetServiceMessage::Unsubscribe(subnet) => { - for fork_digest in self.required_gossip_fork_digests() { - let topic = - GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.unsubscribe(topic); - } - } - SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p.update_enr_subnet(subnet, true); - } - SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p.update_enr_subnet(subnet, false); + SubnetServiceMessage::EnrRemove(sync_subnet_id) => { + self.libp2p + .update_enr_subnet(Subnet::SyncCommittee(sync_subnet_id), false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { self.libp2p.discover_subnet_peers(subnets_to_discover); diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index b55992c624e..32bbfcbcaa1 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -1,238 +1,229 @@ -#[cfg(not(debug_assertions))] -#[cfg(test)] -mod tests { - use crate::persisted_dht::load_dht; - use crate::{NetworkConfig, NetworkService}; - use beacon_chain::test_utils::BeaconChainHarness; - use beacon_chain::BeaconChainTypes; - use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; - use futures::StreamExt; - use lighthouse_network::types::{GossipEncoding, GossipKind}; - use lighthouse_network::{Enr, GossipTopic}; - use slog::{o, Drain, Level, Logger}; - use sloggers::{null::NullLoggerBuilder, Build}; - use std::str::FromStr; - use std::sync::Arc; - use tokio::runtime::Runtime; - use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; - - impl NetworkService { - fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { - self.libp2p.get_topic_params(topic) - } +#![cfg(not(debug_assertions))] +#![cfg(test)] +use crate::persisted_dht::load_dht; +use crate::{NetworkConfig, NetworkService}; +use beacon_chain::test_utils::BeaconChainHarness; +use beacon_chain::BeaconChainTypes; +use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; +use futures::StreamExt; +use lighthouse_network::types::{GossipEncoding, GossipKind}; +use lighthouse_network::{Enr, GossipTopic}; +use slog::{o, Drain, Level, Logger}; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::str::FromStr; +use std::sync::Arc; +use tokio::runtime::Runtime; +use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; + +impl NetworkService { + fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) } +} - fn get_logger(actual_log: bool) -> Logger { - if actual_log { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(Level::Debug) - }; - - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } - } - - #[test] - fn test_dht_persistence() { - let log = get_logger(false); - - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .build() - .chain; - - let store = beacon_chain.store.clone(); +fn get_logger(actual_log: bool) -> Logger { + if actual_log { + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(Level::Debug) + }; - let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); - let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); - let enrs = vec![enr1, enr2]; + Logger::root(drain.fuse(), o!()) + } else { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") + } +} - let runtime = Arc::new(Runtime::new().unwrap()); +#[test] +fn test_dht_persistence() { + let log = get_logger(false); + + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .build() + .chain; + + let store = beacon_chain.store.clone(); + + let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); + let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); + let enrs = vec![enr1, enr2]; + + let runtime = Arc::new(Runtime::new().unwrap()); + + let (signal, exit) = async_channel::bounded(1); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = + task_executor::TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + config.boot_nodes_enr = enrs.clone(); + let config = Arc::new(config); + runtime.block_on(async move { + // Create a new network service which implicitly gets dropped at the + // end of the block. + + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx: _beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx: _work_reprocessing_rx, + } = <_>::default(); + + let _network_service = NetworkService::start( + beacon_chain.clone(), + config, + executor, + None, + beacon_processor_tx, + work_reprocessing_tx, + ) + .await + .unwrap(); + drop(signal); + }); + + let raw_runtime = Arc::try_unwrap(runtime).unwrap(); + raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); + + // Load the persisted dht from the store + let persisted_enrs = load_dht(store); + assert!( + persisted_enrs.contains(&enrs[0]), + "should have persisted the first ENR to store" + ); + assert!( + persisted_enrs.contains(&enrs[1]), + "should have persisted the second ENR to store" + ); +} - let (signal, exit) = async_channel::bounded(1); +// Test removing topic weight on old topics when a fork happens. +#[test] +fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone().into()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), exit, - log.clone(), + get_logger(false), shutdown_tx, ); let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; - config.boot_nodes_enr = enrs.clone(); let config = Arc::new(config); - runtime.block_on(async move { - // Create a new network service which implicitly gets dropped at the - // end of the block. - - let BeaconProcessorChannels { - beacon_processor_tx, - beacon_processor_rx: _beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx: _work_reprocessing_rx, - } = <_>::default(); - - let _network_service = NetworkService::start( - beacon_chain.clone(), - config, - executor, - None, - beacon_processor_tx, - work_reprocessing_tx, - ) - .await - .unwrap(); - drop(signal); - }); - - let raw_runtime = Arc::try_unwrap(runtime).unwrap(); - raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); - - // Load the persisted dht from the store - let persisted_enrs = load_dht(store); - assert!( - persisted_enrs.contains(&enrs[0]), - "should have persisted the first ENR to store" - ); - assert!( - persisted_enrs.contains(&enrs[1]), - "should have persisted the second ENR to store" - ); - } - // Test removing topic weight on old topics when a fork happens. - #[test] - fn test_removing_topic_weight_on_old_topics() { - let runtime = Arc::new(Runtime::new().unwrap()); - - // Capella spec - let mut spec = MinimalEthSpec::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(Epoch::new(1)); - - // Build beacon chain. - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec.clone().into()) - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .mock_execution_layer() - .build() - .chain; - let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); - assert_eq!(next_fork_name, ForkName::Capella); - - // Build network service. - let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { - let (_, exit) = async_channel::bounded(1); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = task_executor::TaskExecutor::new( - Arc::downgrade(&runtime), - exit, - get_logger(false), - shutdown_tx, - ); - - let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); - config.discv5_config.table_filter = |_| true; // Do not ignore local IPs - config.upnp_enabled = false; - let config = Arc::new(config); - - let beacon_processor_channels = - BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); - NetworkService::build( - beacon_chain.clone(), - config, - executor.clone(), - None, - beacon_processor_channels.beacon_processor_tx, - beacon_processor_channels.work_reprocessing_tx, - ) - .await - .unwrap() - }); - - // Subscribe to the topics. - runtime.block_on(async { - while network_globals.gossipsub_subscriptions.read().len() < 2 { - if let Some(msg) = network_service.attestation_service.next().await { - network_service.on_attestation_service_msg(msg); - } + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); + + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.subnet_service.next().await { + network_service.on_subnet_service_msg(msg); } - }); - - // Make sure the service is subscribed to the topics. - let (old_topic1, old_topic2) = { - let mut subnets = SubnetId::compute_subnets_for_epoch::( - network_globals.local_enr().node_id().raw(), - beacon_chain.epoch().unwrap(), - &spec, - ) - .unwrap() - .0 - .collect::>(); - assert_eq!(2, subnets.len()); - - let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; - let old_topic1 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - let old_topic2 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - - (old_topic1, old_topic2) - }; - let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); - assert_eq!(2, subscriptions.len()); - assert!(subscriptions.contains(&old_topic1)); - assert!(subscriptions.contains(&old_topic2)); - let old_topic_params1 = network_service - .get_topic_params(old_topic1.clone()) - .expect("topic score params"); - assert!(old_topic_params1.topic_weight > 0.0); - let old_topic_params2 = network_service - .get_topic_params(old_topic2.clone()) - .expect("topic score params"); - assert!(old_topic_params2.topic_weight > 0.0); - - // Advance slot to the next fork - for _ in 0..MinimalEthSpec::slots_per_epoch() { - beacon_chain.slot_clock.advance_slot(); } + }); + + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_attestation_subnets( + network_globals.local_enr().node_id().raw(), + &spec, + ) + .collect::>(); + assert_eq!(2, subnets.len()); + + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); - // Run `NetworkService::update_next_fork()`. - runtime.block_on(async { - network_service.update_next_fork(); - }); - - // Check that topic_weight on the old topics has been zeroed. - let old_topic_params1 = network_service - .get_topic_params(old_topic1) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params1.topic_weight); - - let old_topic_params2 = network_service - .get_topic_params(old_topic2) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params2.topic_weight); + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs deleted file mode 100644 index 432a2b7fb7c..00000000000 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ /dev/null @@ -1,687 +0,0 @@ -//! This service keeps track of which shard subnet the beacon node should be subscribed to at any -//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and -//! determines whether attestations should be aggregated and/or passed to the beacon node. - -use super::SubnetServiceMessage; -use std::collections::HashSet; -use std::collections::{HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use delay_map::{HashMapDelay, HashSetDelay}; -use futures::prelude::*; -use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; -use slog::{debug, error, info, o, trace, warn}; -use slot_clock::SlotClock; -use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; - -use crate::metrics; - -/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the -/// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; -/// The fraction of a slot that we subscribe to a subnet before the required slot. -/// -/// Currently a whole slot ahead. -const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; - -/// The number of slots after an aggregator duty where we remove the entry from -/// `aggregate_validators_on_subnet` delay map. -const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; - -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub(crate) enum SubscriptionKind { - /// Long lived subscriptions. - /// - /// These have a longer duration and are advertised in our ENR. - LongLived, - /// Short lived subscriptions. - /// - /// Subscribing to these subnets has a short duration and we don't advertise it in our ENR. - ShortLived, -} - -/// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] -pub struct ExactSubnet { - /// The `SubnetId` associated with this subnet. - pub subnet_id: SubnetId, - /// The `Slot` associated with this subnet. - pub slot: Slot, -} - -pub struct AttestationService { - /// Queued events to return to the driving service. - events: VecDeque, - - /// A reference to the beacon chain to process received attestations. - pub(crate) beacon_chain: Arc>, - - /// Subnets we are currently subscribed to as short lived subscriptions. - /// - /// Once they expire, we unsubscribe from these. - /// We subscribe to subnets when we are an aggregator for an exact subnet. - short_lived_subscriptions: HashMapDelay, - - /// Subnets we are currently subscribed to as long lived subscriptions. - /// - /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. - /// These are required of all beacon nodes. The exact number is determined by the chain - /// specification. - long_lived_subscriptions: HashSet, - - /// Short lived subscriptions that need to be executed in the future. - scheduled_short_lived_subscriptions: HashSetDelay, - - /// A collection timeouts to track the existence of aggregate validator subscriptions at an - /// `ExactSubnet`. - aggregate_validators_on_subnet: Option>, - - /// The waker for the current thread. - waker: Option, - - /// The discovery mechanism of lighthouse is disabled. - discovery_disabled: bool, - - /// We are always subscribed to all subnets. - subscribe_all_subnets: bool, - - /// Our Discv5 node_id. - node_id: NodeId, - - /// Future used to manage subscribing and unsubscribing from long lived subnets. - next_long_lived_subscription_event: Pin>, - - /// Whether this node is a block proposer-only node. - proposer_only: bool, - - /// The logger for the attestation service. - log: slog::Logger, -} - -impl AttestationService { - /* Public functions */ - - /// Establish the service based on the passed configuration. - pub fn new( - beacon_chain: Arc>, - node_id: NodeId, - config: &NetworkConfig, - log: &slog::Logger, - ) -> Self { - let log = log.new(o!("service" => "attestation_service")); - - let slot_duration = beacon_chain.slot_clock.slot_duration(); - - if config.subscribe_all_subnets { - slog::info!(log, "Subscribing to all subnets"); - } else { - slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node, "subscription_duration_in_epochs" => beacon_chain.spec.epochs_per_subnet_subscription); - } - - let track_validators = !config.import_all_attestations; - let aggregate_validators_on_subnet = - track_validators.then(|| HashSetDelay::new(slot_duration)); - let mut service = AttestationService { - events: VecDeque::with_capacity(10), - beacon_chain, - short_lived_subscriptions: HashMapDelay::new(slot_duration), - long_lived_subscriptions: HashSet::default(), - scheduled_short_lived_subscriptions: HashSetDelay::default(), - aggregate_validators_on_subnet, - waker: None, - discovery_disabled: config.disable_discovery, - subscribe_all_subnets: config.subscribe_all_subnets, - node_id, - next_long_lived_subscription_event: { - // Set a dummy sleep. Calculating the current subnet subscriptions will update this - // value with a smarter timing - Box::pin(tokio::time::sleep(Duration::from_secs(1))) - }, - proposer_only: config.proposer_only, - log, - }; - - // If we are not subscribed to all subnets, handle the deterministic set of subnets - if !config.subscribe_all_subnets { - service.recompute_long_lived_subnets(); - } - - service - } - - /// Return count of all currently subscribed subnets (long-lived **and** short-lived). - #[cfg(test)] - pub fn subscription_count(&self) -> usize { - if self.subscribe_all_subnets { - self.beacon_chain.spec.attestation_subnet_count as usize - } else { - let count = self - .short_lived_subscriptions - .keys() - .chain(self.long_lived_subscriptions.iter()) - .collect::>() - .len(); - count - } - } - - /// Returns whether we are subscribed to a subnet for testing purposes. - #[cfg(test)] - pub(crate) fn is_subscribed( - &self, - subnet_id: &SubnetId, - subscription_kind: SubscriptionKind, - ) -> bool { - match subscription_kind { - SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id), - SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id), - } - } - - #[cfg(test)] - pub(crate) fn long_lived_subscriptions(&self) -> &HashSet { - &self.long_lived_subscriptions - } - - /// Processes a list of validator subscriptions. - /// - /// This will: - /// - Register new validators as being known. - /// - Search for peers for required subnets. - /// - Request subscriptions for subnets on specific slots when required. - /// - Build the timeouts for each of these events. - /// - /// This returns a result simply for the ergonomics of using ?. The result can be - /// safely dropped. - pub fn validator_subscriptions( - &mut self, - subscriptions: impl Iterator, - ) -> Result<(), String> { - // If the node is in a proposer-only state, we ignore all subnet subscriptions. - if self.proposer_only { - return Ok(()); - } - - // Maps each subnet_id subscription to it's highest slot - let mut subnets_to_discover: HashMap = HashMap::new(); - - // Registers the validator with the attestation service. - for subscription in subscriptions { - metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); - - trace!(self.log, - "Validator subscription"; - "subscription" => ?subscription, - ); - - // Compute the subnet that is associated with this subscription - let subnet_id = match SubnetId::compute_subnet::( - subscription.slot, - subscription.attestation_committee_index, - subscription.committee_count_at_slot, - &self.beacon_chain.spec, - ) { - Ok(subnet_id) => subnet_id, - Err(e) => { - warn!(self.log, - "Failed to compute subnet id for validator subscription"; - "error" => ?e, - ); - continue; - } - }; - // Ensure each subnet_id inserted into the map has the highest slot as it's value. - // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. - if let Some(slot) = subnets_to_discover.get(&subnet_id) { - if subscription.slot > *slot { - subnets_to_discover.insert(subnet_id, subscription.slot); - } - } else if !self.discovery_disabled { - subnets_to_discover.insert(subnet_id, subscription.slot); - } - - let exact_subnet = ExactSubnet { - subnet_id, - slot: subscription.slot, - }; - - // Determine if the validator is an aggregator. If so, we subscribe to the subnet and - // if successful add the validator to a mapping of known aggregators for that exact - // subnet. - - if subscription.is_aggregator { - metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); - if let Err(e) = self.subscribe_to_short_lived_subnet(exact_subnet) { - warn!(self.log, - "Subscription to subnet error"; - "error" => e, - ); - } else { - trace!(self.log, - "Subscribed to subnet for aggregator duties"; - "exact_subnet" => ?exact_subnet, - ); - } - } - } - - // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the - // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request( - subnets_to_discover - .into_iter() - .map(|(subnet_id, slot)| ExactSubnet { subnet_id, slot }), - ) { - warn!(self.log, "Discovery lookup request error"; "error" => e); - }; - } - - Ok(()) - } - - fn recompute_long_lived_subnets(&mut self) { - // Ensure the next computation is scheduled even if assigning subnets fails. - let next_subscription_event = self - .recompute_long_lived_subnets_inner() - .unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration()); - - debug!(self.log, "Recomputing deterministic long lived subnets"); - self.next_long_lived_subscription_event = - Box::pin(tokio::time::sleep(next_subscription_event)); - - if let Some(waker) = self.waker.as_ref() { - waker.wake_by_ref(); - } - } - - /// Gets the long lived subnets the node should be subscribed to during the current epoch and - /// the remaining duration for which they remain valid. - fn recompute_long_lived_subnets_inner(&mut self) -> Result { - let current_epoch = self.beacon_chain.epoch().map_err(|e| { - if !self - .beacon_chain - .slot_clock - .is_prior_to_genesis() - .unwrap_or(false) - { - error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e) - } - })?; - - let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::( - self.node_id.raw(), - current_epoch, - &self.beacon_chain.spec, - ) - .map_err(|e| error!(self.log, "Could not compute subnets for current epoch"; "err" => e))?; - - let next_subscription_slot = - next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let next_subscription_event = self - .beacon_chain - .slot_clock - .duration_to_slot(next_subscription_slot) - .ok_or_else(|| { - error!( - self.log, - "Failed to compute duration to next to long lived subscription event" - ) - })?; - - self.update_long_lived_subnets(subnets.collect()); - - Ok(next_subscription_event) - } - - /// Updates the long lived subnets. - /// - /// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr - /// updated accordingly. - fn update_long_lived_subnets(&mut self, mut subnets: HashSet) { - info!(self.log, "Subscribing to long-lived subnets"; "subnets" => ?subnets.iter().collect::>()); - for subnet in &subnets { - // Add the events for those subnets that are new as long lived subscriptions. - if !self.long_lived_subscriptions.contains(subnet) { - // Check if this subnet is new and send the subscription event if needed. - if !self.short_lived_subscriptions.contains_key(subnet) { - debug!(self.log, "Subscribing to subnet"; - "subnet" => ?subnet, - "subscription_kind" => ?SubscriptionKind::LongLived, - ); - self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( - *subnet, - ))); - } - self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet))); - if !self.discovery_disabled { - self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet: Subnet::Attestation(*subnet), - min_ttl: None, - }])) - } - } - } - - // Update the long_lived_subnets set and check for subnets that are being removed - std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets); - for subnet in subnets { - if !self.long_lived_subscriptions.contains(&subnet) { - self.handle_removed_subnet(subnet, SubscriptionKind::LongLived); - } - } - } - - /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip - /// verification, re-propagates and returns false. - pub fn should_process_attestation( - &self, - subnet: SubnetId, - attestation: &Attestation, - ) -> bool { - // Proposer-only mode does not need to process attestations - if self.proposer_only { - return false; - } - self.aggregate_validators_on_subnet - .as_ref() - .map(|tracked_vals| { - tracked_vals.contains_key(&ExactSubnet { - subnet_id: subnet, - slot: attestation.data().slot, - }) - }) - .unwrap_or(true) - } - - /* Internal private functions */ - - /// Adds an event to the event queue and notifies that this service is ready to be polled - /// again. - fn queue_event(&mut self, ev: SubnetServiceMessage) { - self.events.push_back(ev); - if let Some(waker) = &self.waker { - waker.wake_by_ref() - } - } - /// Checks if there are currently queued discovery requests and the time required to make the - /// request. - /// - /// If there is sufficient time, queues a peer discovery request for all the required subnets. - fn discover_peers_request( - &mut self, - exact_subnets: impl Iterator, - ) -> Result<(), &'static str> { - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let discovery_subnets: Vec = exact_subnets - .filter_map(|exact_subnet| { - // Check if there is enough time to perform a discovery lookup. - if exact_subnet.slot - >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) - { - // Send out an event to start looking for peers. - // Require the peer for an additional slot to ensure we keep the peer for the - // duration of the subscription. - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot + 1) - .map(|duration| std::time::Instant::now() + duration); - Some(SubnetDiscovery { - subnet: Subnet::Attestation(exact_subnet.subnet_id), - min_ttl, - }) - } else { - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - warn!(self.log, - "Not enough time for a discovery search"; - "subnet_id" => ?exact_subnet - ); - None - } - }) - .collect(); - - if !discovery_subnets.is_empty() { - self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); - } - Ok(()) - } - - // Subscribes to the subnet if it should be done immediately, or schedules it if required. - fn subscribe_to_short_lived_subnet( - &mut self, - ExactSubnet { subnet_id, slot }: ExactSubnet, - ) -> Result<(), &'static str> { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // The short time we schedule the subscription before it's actually required. This - // ensures we are subscribed on time, and allows consecutive subscriptions to the same - // subnet to overlap, reducing subnet churn. - let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; - // The time to the required slot. - let time_to_subscription_slot = self - .beacon_chain - .slot_clock - .duration_to_slot(slot) - .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. - - // Calculate how long before we need to subscribe to the subnet. - let time_to_subscription_start = - time_to_subscription_slot.saturating_sub(advance_subscription_duration); - - // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` - // delay map. - let time_to_unsubscribe = - time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); - } - - // If the subscription should be done in the future, schedule it. Otherwise subscribe - // immediately. - if time_to_subscription_start.is_zero() { - // This is a current or past slot, we subscribe immediately. - self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1)?; - } else { - // This is a future slot, schedule subscribing. - trace!(self.log, "Scheduling subnet subscription"; "subnet" => ?subnet_id, "time_to_subscription_start" => ?time_to_subscription_start); - self.scheduled_short_lived_subscriptions - .insert_at(ExactSubnet { subnet_id, slot }, time_to_subscription_start); - } - - Ok(()) - } - - /* A collection of functions that handle the various timeouts */ - - /// Registers a subnet as subscribed. - /// - /// Checks that the time in which the subscription would end is not in the past. If we are - /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send - /// out the appropriate events. - /// - /// On determinist long lived subnets, this is only used for short lived subscriptions. - fn subscribe_to_short_lived_subnet_immediately( - &mut self, - subnet_id: SubnetId, - end_slot: Slot, - ) -> Result<(), &'static str> { - if self.subscribe_all_subnets { - // Case not handled by this service. - return Ok(()); - } - - let time_to_subscription_end = self - .beacon_chain - .slot_clock - .duration_to_slot(end_slot) - .unwrap_or_default(); - - // First check this is worth doing. - if time_to_subscription_end.is_zero() { - return Err("Time when subscription would end has already passed."); - } - - let subscription_kind = SubscriptionKind::ShortLived; - - // We need to check and add a subscription for the right kind, regardless of the presence - // of the subnet as a subscription of the other kind. This is mainly since long lived - // subscriptions can be removed at any time when a validator goes offline. - - let (subscriptions, already_subscribed_as_other_kind) = ( - &mut self.short_lived_subscriptions, - self.long_lived_subscriptions.contains(&subnet_id), - ); - - match subscriptions.get(&subnet_id) { - Some(current_end_slot) => { - // We are already subscribed. Check if we need to extend the subscription. - if &end_slot > current_end_slot { - trace!(self.log, "Extending subscription to subnet"; - "subnet" => ?subnet_id, - "prev_end_slot" => current_end_slot, - "new_end_slot" => end_slot, - "subscription_kind" => ?subscription_kind, - ); - subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); - } - } - None => { - // This is a new subscription. Add with the corresponding timeout and send the - // notification. - subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); - - // Inform of the subscription. - if !already_subscribed_as_other_kind { - debug!(self.log, "Subscribing to subnet"; - "subnet" => ?subnet_id, - "end_slot" => end_slot, - "subscription_kind" => ?subscription_kind, - ); - self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id, - ))); - } - } - } - - Ok(()) - } - - // Unsubscribes from a subnet that was removed if it does not continue to exist as a - // subscription of the other kind. For long lived subscriptions, it also removes the - // advertisement from our ENR. - fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { - let exists_in_other_subscriptions = match subscription_kind { - SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id), - SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id), - }; - - if !exists_in_other_subscriptions { - // Subscription no longer exists as short lived or long lived. - debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind); - self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - subnet_id, - ))); - } - - if subscription_kind == SubscriptionKind::LongLived { - // Remove from our ENR even if we remain subscribed in other way. - self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation( - subnet_id, - ))); - } - } -} - -impl Stream for AttestationService { - type Item = SubnetServiceMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Update the waker if needed. - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - // Send out any generated events. - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); - } - - // If we aren't subscribed to all subnets, handle the deterministic long-lived subnets - if !self.subscribe_all_subnets { - match self.next_long_lived_subscription_event.as_mut().poll(cx) { - Poll::Ready(_) => { - self.recompute_long_lived_subnets(); - // We re-wake the task as there could be other subscriptions to process - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Pending => {} - } - } - - // Process scheduled subscriptions that might be ready, since those can extend a soon to - // expire subscription. - match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { - if let Err(e) = - self.subscribe_to_short_lived_subnet_immediately(subnet_id, slot + 1) - { - debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet_id, "err" => e); - } - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // Finally process any expired subscriptions. - match self.short_lived_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { - self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); - // We re-wake the task as there could be other subscriptions to process - self.waker - .as_ref() - .expect("Waker has been set") - .wake_by_ref(); - } - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // Poll to remove entries on expiration, no need to act on expiration events. - if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { - error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); - } - } - - Poll::Pending - } -} diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index 6450fc72eee..da1f220f042 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -1,10 +1,25 @@ -pub mod attestation_subnets; -pub mod sync_subnets; +//! This service keeps track of which shard subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and +//! determines whether attestations should be aggregated and/or passed to the beacon node. -use lighthouse_network::{Subnet, SubnetDiscovery}; +use std::collections::HashSet; +use std::collections::{HashMap, VecDeque}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use tokio::time::Instant; -pub use attestation_subnets::AttestationService; -pub use sync_subnets::SyncCommitteeService; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use delay_map::HashSetDelay; +use futures::prelude::*; +use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDiscovery}; +use slog::{debug, error, o, warn}; +use slot_clock::SlotClock; +use types::{ + Attestation, EthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + ValidatorSubscription, +}; #[cfg(test)] mod tests; @@ -17,12 +32,646 @@ pub enum SubnetServiceMessage { Unsubscribe(Subnet), /// Add the `SubnetId` to the ENR bitfield. EnrAdd(Subnet), - /// Remove the `SubnetId` from the ENR bitfield. - EnrRemove(Subnet), + /// Remove a sync committee subnet from the ENR. + EnrRemove(SyncSubnetId), /// Discover peers for a list of `SubnetDiscovery`. DiscoverPeers(Vec), } +use crate::metrics; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. +pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; +/// The fraction of a slot that we subscribe to a subnet before the required slot. +/// +/// Currently a whole slot ahead. +const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; + +/// The number of slots after an aggregator duty where we remove the entry from +/// `aggregate_validators_on_subnet` delay map. +const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; + +/// A particular subnet at a given slot. This is used for Attestation subnets and not for sync +/// committee subnets because the logic for handling subscriptions between these types is different. +#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] +pub struct ExactSubnet { + /// The `SubnetId` associated with this subnet. + pub subnet: Subnet, + /// For Attestations, this slot represents the start time at which we need to subscribe to the + /// slot. + pub slot: Slot, +} + +/// The enum used to group all kinds of validator subscriptions +#[derive(Debug, Clone, PartialEq)] +pub enum Subscription { + Attestation(ValidatorSubscription), + SyncCommittee(SyncCommitteeSubscription), +} + +pub struct SubnetService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A reference to the beacon chain to process received attestations. + pub(crate) beacon_chain: Arc>, + + /// Subnets we are currently subscribed to as short lived subscriptions. + /// + /// Once they expire, we unsubscribe from these. + /// We subscribe to subnets when we are an aggregator for an exact subnet. + // NOTE: When setup the default timeout is set for sync committee subscriptions. + subscriptions: HashSetDelay, + + /// Subscriptions that need to be executed in the future. + scheduled_subscriptions: HashSetDelay, + + /// A list of permanent subnets that this node is subscribed to. + // TODO: Shift this to a dynamic bitfield + permanent_attestation_subscriptions: HashSet, + + /// A collection timeouts to track the existence of aggregate validator subscriptions at an + /// `ExactSubnet`. + aggregate_validators_on_subnet: Option>, + + /// The waker for the current thread. + waker: Option, + + /// The discovery mechanism of lighthouse is disabled. + discovery_disabled: bool, + + /// We are always subscribed to all subnets. + subscribe_all_subnets: bool, + + /// Whether this node is a block proposer-only node. + proposer_only: bool, + + /// The logger for the attestation service. + log: slog::Logger, +} + +impl SubnetService { + /* Public functions */ + + /// Establish the service based on the passed configuration. + pub fn new( + beacon_chain: Arc>, + node_id: NodeId, + config: &NetworkConfig, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "subnet_service")); + + let slot_duration = beacon_chain.slot_clock.slot_duration(); + + if config.subscribe_all_subnets { + slog::info!(log, "Subscribing to all subnets"); + } + + // Build the list of known permanent subscriptions, so that we know not to subscribe or + // discover them. + let mut permanent_attestation_subscriptions = HashSet::default(); + if config.subscribe_all_subnets { + // We are subscribed to all subnets, set all the bits to true. + for index in 0..beacon_chain.spec.attestation_subnet_count { + permanent_attestation_subscriptions + .insert(Subnet::Attestation(SubnetId::from(index))); + } + } else { + // Not subscribed to all subnets, so just calculate the required subnets from the node + // id. + for subnet_id in + SubnetId::compute_attestation_subnets(node_id.raw(), &beacon_chain.spec) + { + permanent_attestation_subscriptions.insert(Subnet::Attestation(subnet_id)); + } + } + + // Set up the sync committee subscriptions + let spec = &beacon_chain.spec; + let epoch_duration_secs = + beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); + let default_sync_committee_duration = Duration::from_secs( + epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()), + ); + + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); + + let mut events = VecDeque::with_capacity(10); + + // Queue discovery queries for the permanent attestation subnets + if !config.disable_discovery { + events.push_back(SubnetServiceMessage::DiscoverPeers( + permanent_attestation_subscriptions + .iter() + .cloned() + .map(|subnet| SubnetDiscovery { + subnet, + min_ttl: None, + }) + .collect(), + )); + } + + // Pre-populate the events with permanent subscriptions + for subnet in permanent_attestation_subscriptions.iter() { + events.push_back(SubnetServiceMessage::Subscribe(*subnet)); + events.push_back(SubnetServiceMessage::EnrAdd(*subnet)); + } + + SubnetService { + events, + beacon_chain, + subscriptions: HashSetDelay::new(default_sync_committee_duration), + permanent_attestation_subscriptions, + scheduled_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, + waker: None, + discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + proposer_only: config.proposer_only, + log, + } + } + + /// Return count of all currently subscribed short-lived subnets. + #[cfg(test)] + pub fn subscriptions(&self) -> impl Iterator { + self.subscriptions.iter() + } + + #[cfg(test)] + pub fn permanent_subscriptions(&self) -> impl Iterator { + self.permanent_attestation_subscriptions.iter() + } + + /// Returns whether we are subscribed to a subnet for testing purposes. + #[cfg(test)] + pub(crate) fn is_subscribed(&self, subnet: &Subnet) -> bool { + self.subscriptions.contains_key(subnet) + || self.permanent_attestation_subscriptions.contains(subnet) + } + + /// Processes a list of validator subscriptions. + /// + /// This is fundamentally called form the HTTP API when a validator requests duties from us + /// This will: + /// - Register new validators as being known. + /// - Search for peers for required subnets. + /// - Request subscriptions for subnets on specific slots when required. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions(&mut self, subscriptions: impl Iterator) { + // If the node is in a proposer-only state, we ignore all subnet subscriptions. + if self.proposer_only { + return; + } + + // Maps each subnet subscription to it's highest slot + let mut subnets_to_discover: HashMap = HashMap::new(); + + // Registers the validator with the attestation service. + for general_subscription in subscriptions { + match general_subscription { + Subscription::Attestation(subscription) => { + metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); + + // Compute the subnet that is associated with this subscription + let subnet = match SubnetId::compute_subnet::( + subscription.slot, + subscription.attestation_committee_index, + subscription.committee_count_at_slot, + &self.beacon_chain.spec, + ) { + Ok(subnet_id) => Subnet::Attestation(subnet_id), + Err(e) => { + warn!(self.log, + "Failed to compute subnet id for validator subscription"; + "error" => ?e, + ); + continue; + } + }; + + // Ensure each subnet_id inserted into the map has the highest slot as it's value. + // Higher slot corresponds to higher min_ttl in the `SubnetDiscovery` entry. + if let Some(slot) = subnets_to_discover.get(&subnet) { + if subscription.slot > *slot { + subnets_to_discover.insert(subnet, subscription.slot); + } + } else if !self.discovery_disabled { + subnets_to_discover.insert(subnet, subscription.slot); + } + + let exact_subnet = ExactSubnet { + subnet, + slot: subscription.slot, + }; + + // Determine if the validator is an aggregator. If so, we subscribe to the subnet and + // if successful add the validator to a mapping of known aggregators for that exact + // subnet. + + if subscription.is_aggregator { + metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); + if let Err(e) = self.subscribe_to_subnet(exact_subnet) { + warn!(self.log, + "Subscription to subnet error"; + "error" => e, + ); + } + } + } + Subscription::SyncCommittee(subscription) => { + metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); + // NOTE: We assume all subscriptions have been verified before reaching this service + + // Registers the validator with the subnet service. + let subnet_ids = + match SyncSubnetId::compute_subnets_for_sync_committee::( + &subscription.sync_committee_indices, + ) { + Ok(subnet_ids) => subnet_ids, + Err(e) => { + warn!(self.log, + "Failed to compute subnet id for sync committee subscription"; + "error" => ?e, + "validator_index" => subscription.validator_index + ); + continue; + } + }; + + for subnet_id in subnet_ids { + let subnet = Subnet::SyncCommittee(subnet_id); + let slot_required_until = subscription + .until_epoch + .start_slot(T::EthSpec::slots_per_epoch()); + subnets_to_discover.insert(subnet, slot_required_until); + + let Some(duration_to_unsubscribe) = self + .beacon_chain + .slot_clock + .duration_to_slot(slot_required_until) + else { + warn!(self.log, "Subscription to sync subnet error"; "error" => "Unable to determine duration to unsubscription slot", "validator_index" => subscription.validator_index); + continue; + }; + + if duration_to_unsubscribe == Duration::from_secs(0) { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .unwrap_or(Slot::from(0u64)); + warn!( + self.log, + "Sync committee subscription is past expiration"; + "subnet" => ?subnet, + "current_slot" => ?current_slot, + "unsubscribe_slot" => ?slot_required_until, ); + continue; + } + + self.subscribe_to_sync_subnet( + subnet, + duration_to_unsubscribe, + slot_required_until, + ); + } + } + } + } + + // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the + // required subnets. + if !self.discovery_disabled { + if let Err(e) = self.discover_peers_request(subnets_to_discover.into_iter()) { + warn!(self.log, "Discovery lookup request error"; "error" => e); + }; + } + } + + /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip + /// verification, re-propagates and returns false. + pub fn should_process_attestation( + &self, + subnet: Subnet, + attestation: &Attestation, + ) -> bool { + // Proposer-only mode does not need to process attestations + if self.proposer_only { + return false; + } + self.aggregate_validators_on_subnet + .as_ref() + .map(|tracked_vals| { + tracked_vals.contains_key(&ExactSubnet { + subnet, + slot: attestation.data().slot, + }) + }) + .unwrap_or(true) + } + + /* Internal private functions */ + + /// Adds an event to the event queue and notifies that this service is ready to be polled + /// again. + fn queue_event(&mut self, ev: SubnetServiceMessage) { + self.events.push_back(ev); + if let Some(waker) = &self.waker { + waker.wake_by_ref() + } + } + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time, queues a peer discovery request for all the required subnets. + // NOTE: Sending early subscriptions results in early searching for peers on subnets. + fn discover_peers_request( + &mut self, + subnets_to_discover: impl Iterator, + ) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or("Could not get the current slot")?; + + let discovery_subnets: Vec = subnets_to_discover + .filter_map(|(subnet, relevant_slot)| { + // We generate discovery requests for all subnets (even one's we are permenantly + // subscribed to) in order to ensure our peer counts are satisfactory to perform the + // necessary duties. + + // Check if there is enough time to perform a discovery lookup. + if relevant_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) + { + // Send out an event to start looking for peers. + // Require the peer for an additional slot to ensure we keep the peer for the + // duration of the subscription. + let min_ttl = self + .beacon_chain + .slot_clock + .duration_to_slot(relevant_slot + 1) + .map(|duration| std::time::Instant::now() + duration); + Some(SubnetDiscovery { subnet, min_ttl }) + } else { + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + warn!(self.log, + "Not enough time for a discovery search"; + "subnet_id" => ?subnet, + ); + None + } + }) + .collect(); + + if !discovery_subnets.is_empty() { + self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + } + Ok(()) + } + + // Subscribes to the subnet if it should be done immediately, or schedules it if required. + fn subscribe_to_subnet( + &mut self, + ExactSubnet { subnet, slot }: ExactSubnet, + ) -> Result<(), &'static str> { + // If the subnet is one of our permanent subnets, we do not need to subscribe. + if self.subscribe_all_subnets || self.permanent_attestation_subscriptions.contains(&subnet) + { + return Ok(()); + } + + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain + .slot_clock + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = + time_to_subscription_slot.saturating_sub(advance_subscription_duration); + + // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` + // delay map. + let time_to_unsubscribe = + time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + tracked_vals.insert_at(ExactSubnet { subnet, slot }, time_to_unsubscribe); + } + + // If the subscription should be done in the future, schedule it. Otherwise subscribe + // immediately. + if time_to_subscription_start.is_zero() { + // This is a current or past slot, we subscribe immediately. + self.subscribe_to_subnet_immediately(subnet, slot + 1)?; + } else { + // This is a future slot, schedule subscribing. + // We need to include the slot to make the key unique to prevent overwriting the entry + // for the same subnet. + self.scheduled_subscriptions + .insert_at(ExactSubnet { subnet, slot }, time_to_subscription_start); + } + + Ok(()) + } + + /// Adds a subscription event to the sync subnet. + fn subscribe_to_sync_subnet( + &mut self, + subnet: Subnet, + duration_to_unsubscribe: Duration, + slot_required_until: Slot, + ) { + // Return if we have subscribed to all subnets + if self.subscribe_all_subnets { + return; + } + + // Update the unsubscription duration if we already have a subscription for the subnet + if let Some(current_instant_to_unsubscribe) = self.subscriptions.deadline(&subnet) { + // The extra 500ms in the comparison accounts of the inaccuracy of the underlying + // DelayQueue inside the delaymap struct. + let current_duration_to_unsubscribe = (current_instant_to_unsubscribe + + Duration::from_millis(500)) + .checked_duration_since(Instant::now()) + .unwrap_or(Duration::from_secs(0)); + + if duration_to_unsubscribe > current_duration_to_unsubscribe { + self.subscriptions + .update_timeout(&subnet, duration_to_unsubscribe); + } + } else { + // We have not subscribed before, so subscribe + self.subscriptions + .insert_at(subnet, duration_to_unsubscribe); + // We are not currently subscribed and have no waiting subscription, create one + debug!(self.log, "Subscribing to subnet"; "subnet" => ?subnet, "until" => ?slot_required_until); + self.events + .push_back(SubnetServiceMessage::Subscribe(subnet)); + + // add the sync subnet to the ENR bitfield + self.events.push_back(SubnetServiceMessage::EnrAdd(subnet)); + } + } + + /* A collection of functions that handle the various timeouts */ + + /// Registers a subnet as subscribed. + /// + /// Checks that the time in which the subscription would end is not in the past. If we are + /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send + /// out the appropriate events. + fn subscribe_to_subnet_immediately( + &mut self, + subnet: Subnet, + end_slot: Slot, + ) -> Result<(), &'static str> { + if self.subscribe_all_subnets { + // Case not handled by this service. + return Ok(()); + } + + let time_to_subscription_end = self + .beacon_chain + .slot_clock + .duration_to_slot(end_slot) + .unwrap_or_default(); + + // First check this is worth doing. + if time_to_subscription_end.is_zero() { + return Err("Time when subscription would end has already passed."); + } + + // Check if we already have this subscription. If we do, optionally update the timeout of + // when we need the subscription, otherwise leave as is. + // If this is a new subscription simply add it to our mapping and subscribe. + match self.subscriptions.deadline(&subnet) { + Some(current_end_slot_time) => { + // We are already subscribed. Check if we need to extend the subscription. + if current_end_slot_time + .checked_duration_since(Instant::now()) + .unwrap_or(Duration::from_secs(0)) + < time_to_subscription_end + { + self.subscriptions + .update_timeout(&subnet, time_to_subscription_end); + } + } + None => { + // This is a new subscription. Add with the corresponding timeout and send the + // notification. + self.subscriptions + .insert_at(subnet, time_to_subscription_end); + + // Inform of the subscription. + debug!(self.log, "Subscribing to subnet"; + "subnet" => ?subnet, + "end_slot" => end_slot, + ); + self.queue_event(SubnetServiceMessage::Subscribe(subnet)); + } + } + Ok(()) + } + + // Unsubscribes from a subnet that was removed. + fn handle_removed_subnet(&mut self, subnet: Subnet) { + if !self.subscriptions.contains_key(&subnet) { + // Subscription no longer exists as short lived subnet + debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet); + self.queue_event(SubnetServiceMessage::Unsubscribe(subnet)); + + // If this is a sync subnet, we need to remove it from our ENR. + if let Subnet::SyncCommittee(sync_subnet_id) = subnet { + self.queue_event(SubnetServiceMessage::EnrRemove(sync_subnet_id)); + } + } + } +} + +impl Stream for SubnetService { + type Item = SubnetServiceMessage; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + // Update the waker if needed. + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + // Send out any generated events. + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + // Process scheduled subscriptions that might be ready, since those can extend a soon to + // expire subscription. + match self.scheduled_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(exact_subnet))) => { + let ExactSubnet { subnet, .. } = exact_subnet; + let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default(); + if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) { + debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); + } + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Process any expired subscriptions. + match self.subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(subnet))) => { + self.handle_removed_subnet(subnet); + // We re-wake the task as there could be other subscriptions to process + self.waker + .as_ref() + .expect("Waker has been set") + .wake_by_ref(); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Poll to remove entries on expiration, no need to act on expiration events. + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { + error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); + } + } + + Poll::Pending + } +} + /// Note: This `PartialEq` impl is for use only in tests. /// The `DiscoverPeers` comparison is good enough for testing only. #[cfg(test)] @@ -32,7 +681,6 @@ impl PartialEq for SubnetServiceMessage { (SubnetServiceMessage::Subscribe(a), SubnetServiceMessage::Subscribe(b)) => a == b, (SubnetServiceMessage::Unsubscribe(a), SubnetServiceMessage::Unsubscribe(b)) => a == b, (SubnetServiceMessage::EnrAdd(a), SubnetServiceMessage::EnrAdd(b)) => a == b, - (SubnetServiceMessage::EnrRemove(a), SubnetServiceMessage::EnrRemove(b)) => a == b, (SubnetServiceMessage::DiscoverPeers(a), SubnetServiceMessage::DiscoverPeers(b)) => { if a.len() != b.len() { return false; diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs deleted file mode 100644 index eda7ce8efbd..00000000000 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ /dev/null @@ -1,359 +0,0 @@ -//! This service keeps track of which sync committee subnet the beacon node should be subscribed to at any -//! given time. It schedules subscriptions to sync committee subnets and requests peer discoveries. - -use std::collections::{hash_map::Entry, HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use futures::prelude::*; -use slog::{debug, error, o, trace, warn}; - -use super::SubnetServiceMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use delay_map::HashSetDelay; -use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; -use slot_clock::SlotClock; -use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; - -use crate::metrics; - -/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the -/// slot is less than this number, skip the peer discovery process. -/// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; - -/// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -pub struct ExactSubnet { - /// The `SyncSubnetId` associated with this subnet. - pub subnet_id: SyncSubnetId, - /// The epoch until which we need to stay subscribed to the subnet. - pub until_epoch: Epoch, -} -pub struct SyncCommitteeService { - /// Queued events to return to the driving service. - events: VecDeque, - - /// A reference to the beacon chain to process received attestations. - pub(crate) beacon_chain: Arc>, - - /// The collection of all currently subscribed subnets. - subscriptions: HashMap, - - /// A collection of timeouts for when to unsubscribe from a subnet. - unsubscriptions: HashSetDelay, - - /// The waker for the current thread. - waker: Option, - - /// The discovery mechanism of lighthouse is disabled. - discovery_disabled: bool, - - /// We are always subscribed to all subnets. - subscribe_all_subnets: bool, - - /// Whether this node is a block proposer-only node. - proposer_only: bool, - - /// The logger for the attestation service. - log: slog::Logger, -} - -impl SyncCommitteeService { - /* Public functions */ - - pub fn new( - beacon_chain: Arc>, - config: &NetworkConfig, - log: &slog::Logger, - ) -> Self { - let log = log.new(o!("service" => "sync_committee_service")); - - let spec = &beacon_chain.spec; - let epoch_duration_secs = - beacon_chain.slot_clock.slot_duration().as_secs() * T::EthSpec::slots_per_epoch(); - let default_timeout = - epoch_duration_secs.saturating_mul(spec.epochs_per_sync_committee_period.as_u64()); - - SyncCommitteeService { - events: VecDeque::with_capacity(10), - beacon_chain, - subscriptions: HashMap::new(), - unsubscriptions: HashSetDelay::new(Duration::from_secs(default_timeout)), - waker: None, - subscribe_all_subnets: config.subscribe_all_subnets, - discovery_disabled: config.disable_discovery, - proposer_only: config.proposer_only, - log, - } - } - - /// Return count of all currently subscribed subnets. - #[cfg(test)] - pub fn subscription_count(&self) -> usize { - use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; - if self.subscribe_all_subnets { - SYNC_COMMITTEE_SUBNET_COUNT as usize - } else { - self.subscriptions.len() - } - } - - /// Processes a list of sync committee subscriptions. - /// - /// This will: - /// - Search for peers for required subnets. - /// - Request subscriptions required subnets. - /// - Build the timeouts for each of these events. - /// - /// This returns a result simply for the ergonomics of using ?. The result can be - /// safely dropped. - pub fn validator_subscriptions( - &mut self, - subscriptions: Vec, - ) -> Result<(), String> { - // A proposer-only node does not subscribe to any sync-committees - if self.proposer_only { - return Ok(()); - } - - let mut subnets_to_discover = Vec::new(); - for subscription in subscriptions { - metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); - //NOTE: We assume all subscriptions have been verified before reaching this service - - // Registers the validator with the subnet service. - // This will subscribe to long-lived random subnets if required. - trace!(self.log, - "Sync committee subscription"; - "subscription" => ?subscription, - ); - - let subnet_ids = match SyncSubnetId::compute_subnets_for_sync_committee::( - &subscription.sync_committee_indices, - ) { - Ok(subnet_ids) => subnet_ids, - Err(e) => { - warn!(self.log, - "Failed to compute subnet id for sync committee subscription"; - "error" => ?e, - "validator_index" => subscription.validator_index - ); - continue; - } - }; - - for subnet_id in subnet_ids { - let exact_subnet = ExactSubnet { - subnet_id, - until_epoch: subscription.until_epoch, - }; - subnets_to_discover.push(exact_subnet.clone()); - if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { - warn!(self.log, - "Subscription to sync subnet error"; - "error" => e, - "validator_index" => subscription.validator_index, - ); - } else { - trace!(self.log, - "Subscribed to subnet for sync committee duties"; - "exact_subnet" => ?exact_subnet, - "validator_index" => subscription.validator_index - ); - } - } - } - // If the discovery mechanism isn't disabled, attempt to set up a peer discovery for the - // required subnets. - if !self.discovery_disabled { - if let Err(e) = self.discover_peers_request(subnets_to_discover.iter()) { - warn!(self.log, "Discovery lookup request error"; "error" => e); - }; - } - - // pre-emptively wake the thread to check for new events - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } - Ok(()) - } - - /* Internal private functions */ - - /// Checks if there are currently queued discovery requests and the time required to make the - /// request. - /// - /// If there is sufficient time, queues a peer discovery request for all the required subnets. - fn discover_peers_request<'a>( - &mut self, - exact_subnets: impl Iterator, - ) -> Result<(), &'static str> { - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - - let discovery_subnets: Vec = exact_subnets - .filter_map(|exact_subnet| { - let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); - // check if there is enough time to perform a discovery lookup - if until_slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { - // if the slot is more than epoch away, add an event to start looking for peers - // add one slot to ensure we keep the peer for the subscription slot - let min_ttl = self - .beacon_chain - .slot_clock - .duration_to_slot(until_slot + 1) - .map(|duration| std::time::Instant::now() + duration); - Some(SubnetDiscovery { - subnet: Subnet::SyncCommittee(exact_subnet.subnet_id), - min_ttl, - }) - } else { - // We may want to check the global PeerInfo to see estimated timeouts for each - // peer before they can be removed. - warn!(self.log, - "Not enough time for a discovery search"; - "subnet_id" => ?exact_subnet - ); - None - } - }) - .collect(); - - if !discovery_subnets.is_empty() { - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); - } - Ok(()) - } - - /// Adds a subscription event and an associated unsubscription event if required. - fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { - // Return if we have subscribed to all subnets - if self.subscribe_all_subnets { - return Ok(()); - } - - // Return if we already have a subscription for exact_subnet - if self.subscriptions.get(&exact_subnet.subnet_id) == Some(&exact_subnet.until_epoch) { - return Ok(()); - } - - // Return if we already have subscription set to expire later than the current request. - if let Some(until_epoch) = self.subscriptions.get(&exact_subnet.subnet_id) { - if *until_epoch >= exact_subnet.until_epoch { - return Ok(()); - } - } - - // initialise timing variables - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; - - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let until_slot = exact_subnet.until_epoch.end_slot(slots_per_epoch); - // Calculate the duration to the unsubscription event. - let expected_end_subscription_duration = if current_slot >= until_slot { - warn!( - self.log, - "Sync committee subscription is past expiration"; - "current_slot" => current_slot, - "exact_subnet" => ?exact_subnet, - ); - return Ok(()); - } else { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // the duration until we no longer need this subscription. We assume a single slot is - // sufficient. - self.beacon_chain - .slot_clock - .duration_to_slot(until_slot) - .ok_or("Unable to determine duration to unsubscription slot")? - + slot_duration - }; - - if let Entry::Vacant(e) = self.subscriptions.entry(exact_subnet.subnet_id) { - // We are not currently subscribed and have no waiting subscription, create one - debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "until_epoch" => ?exact_subnet.until_epoch); - e.insert(exact_subnet.until_epoch); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::SyncCommittee( - exact_subnet.subnet_id, - ))); - - // add the subnet to the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrAdd(Subnet::SyncCommittee( - exact_subnet.subnet_id, - ))); - - // add an unsubscription event to remove ourselves from the subnet once completed - self.unsubscriptions - .insert_at(exact_subnet.subnet_id, expected_end_subscription_duration); - } else { - // We are already subscribed, extend the unsubscription duration - self.unsubscriptions - .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); - } - - Ok(()) - } - - /// A queued unsubscription is ready. - fn handle_unsubscriptions(&mut self, subnet_id: SyncSubnetId) { - debug!(self.log, "Unsubscribing from subnet"; "subnet" => *subnet_id); - - self.subscriptions.remove(&subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::SyncCommittee( - subnet_id, - ))); - - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::SyncCommittee( - subnet_id, - ))); - } -} - -impl Stream for SyncCommitteeService { - type Item = SubnetServiceMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // update the waker if needed - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - // process any un-subscription events - match self.unsubscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // process any generated events - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); - } - - Poll::Pending - } -} diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index a784b05ea7a..7283b4af314 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -5,9 +5,9 @@ use beacon_chain::{ test_utils::get_kzg, BeaconChain, }; -use futures::prelude::*; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::NetworkConfig; +use logging::test_logger; use slog::{o, Drain, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; @@ -21,6 +21,10 @@ use types::{ SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; +// Set to enable/disable logging +// const TEST_LOG_LEVEL: Option = Some(slog::Level::Debug); +const TEST_LOG_LEVEL: Option = None; + const SLOT_DURATION_MILLIS: u64 = 400; type TestBeaconChainType = Witness< @@ -42,7 +46,7 @@ impl TestBeaconChain { let keypairs = generate_deterministic_keypairs(1); - let log = get_logger(None); + let log = get_logger(TEST_LOG_LEVEL); let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); @@ -114,15 +118,13 @@ fn get_logger(log_level: Option) -> Logger { static CHAIN: LazyLock = LazyLock::new(TestBeaconChain::new_with_system_clock); -fn get_attestation_service( - log_level: Option, -) -> AttestationService { - let log = get_logger(log_level); +fn get_subnet_service() -> SubnetService { + let log = test_logger(); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); - AttestationService::new( + SubnetService::new( beacon_chain, lighthouse_network::discv5::enr::NodeId::random(), &config, @@ -130,15 +132,6 @@ fn get_attestation_service( ) } -fn get_sync_committee_service() -> SyncCommitteeService { - let log = get_logger(None); - let config = NetworkConfig::default(); - - let beacon_chain = CHAIN.chain.clone(); - - SyncCommitteeService::new(beacon_chain, &config, &log) -} - // gets a number of events from the subscription service, or returns none if it times out after a number // of slots async fn get_events + Unpin>( @@ -172,10 +165,10 @@ async fn get_events + Unpin>( events } -mod attestation_service { +mod test { #[cfg(not(windows))] - use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; + use crate::subnet_service::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; use super::*; @@ -184,13 +177,13 @@ mod attestation_service { slot: Slot, committee_count_at_slot: u64, is_aggregator: bool, - ) -> ValidatorSubscription { - ValidatorSubscription { + ) -> Subscription { + Subscription::Attestation(ValidatorSubscription { attestation_committee_index, slot, committee_count_at_slot, is_aggregator, - } + }) } fn get_subscriptions( @@ -198,7 +191,7 @@ mod attestation_service { slot: Slot, committee_count_at_slot: u64, is_aggregator: bool, - ) -> Vec { + ) -> Vec { (0..validator_count) .map(|validator_index| { get_subscription( @@ -215,72 +208,77 @@ mod attestation_service { async fn subscribe_current_slot_wait_for_unsubscribe() { // subscription config let committee_index = 1; - // Keep a low subscription slot so that there are no additional subnet discovery events. - let subscription_slot = 0; - let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + let _events = get_events(&mut subnet_service, None, 1).await; + + let current_slot = subnet_service .beacon_chain .slot_clock .now() .expect("Could not get current slot"); + // Generate a subnet that isn't in our permanent subnet collection + let subscription_slot = current_slot + 1; + let mut committee_count = 1; + let mut subnet = Subnet::Attestation( + SubnetId::compute_subnet::( + subscription_slot, + committee_index, + committee_count, + &subnet_service.beacon_chain.spec, + ) + .unwrap(), + ); + while subnet_service + .permanent_subscriptions() + .any(|x| *x == subnet) + { + committee_count += 1; + subnet = Subnet::Attestation( + SubnetId::compute_subnet::( + subscription_slot, + committee_index, + committee_count, + &subnet_service.beacon_chain.spec, + ) + .unwrap(), + ); + } + let subscriptions = vec![get_subscription( committee_index, - current_slot + Slot::new(subscription_slot), + subscription_slot, committee_count, true, )]; // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); // not enough time for peer discovery, just subscribe, unsubscribe - let subnet_id = SubnetId::compute_subnet::( - current_slot + Slot::new(subscription_slot), - committee_index, - committee_count, - &attestation_service.beacon_chain.spec, - ) - .unwrap(); let expected = [ - SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id)), - SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id)), + SubnetServiceMessage::Subscribe(subnet), + SubnetServiceMessage::Unsubscribe(subnet), ]; // Wait for 1 slot duration to get the unsubscription event let events = get_events( - &mut attestation_service, - Some(subnets_per_node * 3 + 2), - (MainnetEthSpec::slots_per_epoch() * 3) as u32, + &mut subnet_service, + Some(2), + (MainnetEthSpec::slots_per_epoch()) as u32, ) .await; - matches::assert_matches!( - events[..6], - [ - SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3), - SubnetServiceMessage::DiscoverPeers(_), - SubnetServiceMessage::Subscribe(_), - SubnetServiceMessage::EnrAdd(_), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); + assert_eq!(events, expected); - // If the long lived and short lived subnets are the same, there should be no more events - // as we don't resubscribe already subscribed subnets. - if !attestation_service - .is_subscribed(&subnet_id, attestation_subnets::SubscriptionKind::LongLived) - { - assert_eq!(expected[..], events[subnets_per_node * 3..]); - } - // Should be subscribed to only subnets_per_node long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + // Should be subscribed to only subnets_per_node permananet subnet after unsubscription. + assert_eq!( + subnet_service.permanent_subscriptions().count(), + subnets_per_node + ); + assert_eq!(subnet_service.subscriptions().count(), 0); } /// Test to verify that we are not unsubscribing to a subnet before a required subscription. @@ -289,7 +287,6 @@ mod attestation_service { async fn test_same_subnet_unsubscription() { // subscription config let committee_count = 1; - let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // Makes 2 validator subscriptions to the same subnet but at different slots. // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). @@ -298,9 +295,10 @@ mod attestation_service { let com1 = 1; let com2 = 0; - // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + // create the subnet service and subscriptions + let mut subnet_service = get_subnet_service(); + let _events = get_events(&mut subnet_service, None, 0).await; + let current_slot = subnet_service .beacon_chain .slot_clock .now() @@ -324,7 +322,7 @@ mod attestation_service { current_slot + Slot::new(subscription_slot1), com1, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, ) .unwrap(); @@ -332,7 +330,7 @@ mod attestation_service { current_slot + Slot::new(subscription_slot2), com2, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, ) .unwrap(); @@ -341,110 +339,80 @@ mod attestation_service { assert_eq!(subnet_id1, subnet_id2); // submit the subscriptions - attestation_service - .validator_subscriptions(vec![sub1, sub2].into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter()); // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1) - // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations). - let events = get_events(&mut attestation_service, None, 1).await; - matches::assert_matches!( - events[..3], - [ - SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); - let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); - // Should be still subscribed to 2 long lived and up to 1 short lived subnet if both are - // different. - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { - // The index is 3*subnets_per_node (because we subscribe + discover + enr per long lived - // subnet) + 1 - let index = 3 * subnets_per_node; - assert_eq!(expected, events[index]); - assert_eq!( - attestation_service.subscription_count(), - subnets_per_node + 1 - ); + if subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + // If we are permanently subscribed to this subnet, we won't see a subscribe message + let _ = get_events(&mut subnet_service, None, 1).await; } else { - assert!(attestation_service.subscription_count() == subnets_per_node); + let subscription = get_events(&mut subnet_service, None, 1).await; + assert_eq!(subscription, [expected]); } // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; + let unsubscribe_event = get_events(&mut subnet_service, None, 1).await; // If the long lived and short lived subnets are different, we should get an unsubscription // event. - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { - assert_eq!( - [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - subnet_id1 - ))], - unsubscribe_event[..] - ); + let expected = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!([expected], unsubscribe_event[..]); } - // Should be subscribed 2 long lived subnet after unsubscription. - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + // Should no longer be subscribed to any short lived subnets after unsubscription. + assert_eq!(subnet_service.subscriptions().count(), 0); } #[tokio::test] async fn subscribe_all_subnets() { let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count; let subscription_slot = 3; - let subscription_count = attestation_subnet_count; + let subscriptions_count = attestation_subnet_count; let committee_count = 1; let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + let current_slot = subnet_service .beacon_chain .slot_clock .now() .expect("Could not get current slot"); let subscriptions = get_subscriptions( - subscription_count, + subscriptions_count, current_slot + subscription_slot, committee_count, true, ); // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut attestation_service, Some(131), 10).await; + let events = get_events(&mut subnet_service, Some(130), 10).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; - let mut unexpected_msg_count = 0; let mut unsubscribe_event_count = 0; + let mut subscription_event_count = 0; for event in &events { match event { SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, - SubnetServiceMessage::Subscribe(_any_subnet) => {} + SubnetServiceMessage::Subscribe(_any_subnet) => subscription_event_count += 1, SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, SubnetServiceMessage::Unsubscribe(_) => unsubscribe_event_count += 1, - _ => unexpected_msg_count += 1, + SubnetServiceMessage::EnrRemove(_) => {} } } - // There should be a Subscribe Event, and Enr Add event and a DiscoverPeers event for each - // long-lived subnet initially. The next event should be a bulk discovery event. - let bulk_discovery_index = 3 * subnets_per_node; + // There should be a Subscribe Event, an Enr Add event for each + // permanent subnet initially. There is a single discovery event for the permanent + // subnets. + // The next event should be a bulk discovery event. + let bulk_discovery_index = subnets_per_node * 2 + 1; // The bulk discovery request length should be equal to validator_count let bulk_discovery_event = &events[bulk_discovery_index]; if let SubnetServiceMessage::DiscoverPeers(d) = bulk_discovery_event { @@ -455,14 +423,13 @@ mod attestation_service { // 64 `DiscoverPeer` requests of length 1 corresponding to deterministic subnets // and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. - assert_eq!(discover_peer_count, subnets_per_node + 1); - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + assert_eq!(discover_peer_count, 1 + 1); + assert_eq!(subscription_event_count, attestation_subnet_count); assert_eq!(enr_add_count, subnets_per_node); assert_eq!( unsubscribe_event_count, attestation_subnet_count - subnets_per_node as u64 ); - assert_eq!(unexpected_msg_count, 0); // test completed successfully } @@ -473,30 +440,28 @@ mod attestation_service { let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; // the 65th subscription should result in no more messages than the previous scenario - let subscription_count = attestation_subnet_count + 1; + let subscriptions_count = attestation_subnet_count + 1; let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + let current_slot = subnet_service .beacon_chain .slot_clock .now() .expect("Could not get current slot"); let subscriptions = get_subscriptions( - subscription_count, + subscriptions_count, current_slot + subscription_slot, committee_count, true, ); // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut attestation_service, None, 3).await; + let events = get_events(&mut subnet_service, None, 3).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unexpected_msg_count = 0; @@ -506,7 +471,10 @@ mod attestation_service { SubnetServiceMessage::DiscoverPeers(_) => discover_peer_count += 1, SubnetServiceMessage::Subscribe(_any_subnet) => {} SubnetServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1, - _ => unexpected_msg_count += 1, + _ => { + unexpected_msg_count += 1; + println!("{:?}", event); + } } } @@ -520,8 +488,8 @@ mod attestation_service { // subnets_per_node `DiscoverPeer` requests of length 1 corresponding to long-lived subnets // and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. - assert_eq!(discover_peer_count, subnets_per_node + 1); - assert_eq!(attestation_service.subscription_count(), subnets_per_node); + assert_eq!(discover_peer_count, 1 + 1); // Generates a single discovery for permanent + // subscriptions and 1 for the subscription assert_eq!(enr_add_count, subnets_per_node); assert_eq!(unexpected_msg_count, 0); } @@ -531,18 +499,23 @@ mod attestation_service { async fn test_subscribe_same_subnet_several_slots_apart() { // subscription config let committee_count = 1; - let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; - // Makes 2 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + // Makes 3 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for each of the later slots subscriptions + // (subscription_slot2 and subscription_slot3). let subscription_slot1 = 0; let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let subscription_slot3 = subscription_slot2 * 2; let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; let com2 = 0; + let com3 = CHAIN.chain.spec.attestation_subnet_count - com1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(None); - let current_slot = attestation_service + let mut subnet_service = get_subnet_service(); + // Remove permanent events + let _events = get_events(&mut subnet_service, None, 0).await; + + let current_slot = subnet_service .beacon_chain .slot_clock .now() @@ -562,11 +535,18 @@ mod attestation_service { true, ); + let sub3 = get_subscription( + com3, + current_slot + Slot::new(subscription_slot3), + committee_count, + true, + ); + let subnet_id1 = SubnetId::compute_subnet::( current_slot + Slot::new(subscription_slot1), com1, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, ) .unwrap(); @@ -574,48 +554,48 @@ mod attestation_service { current_slot + Slot::new(subscription_slot2), com2, committee_count, - &attestation_service.beacon_chain.spec, + &subnet_service.beacon_chain.spec, + ) + .unwrap(); + + let subnet_id3 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot3), + com3, + committee_count, + &subnet_service.beacon_chain.spec, ) .unwrap(); // Assert that subscriptions are different but their subnet is the same assert_ne!(sub1, sub2); + assert_ne!(sub1, sub3); + assert_ne!(sub2, sub3); assert_eq!(subnet_id1, subnet_id2); + assert_eq!(subnet_id1, subnet_id3); // submit the subscriptions - attestation_service - .validator_subscriptions(vec![sub1, sub2].into_iter()) - .unwrap(); + subnet_service.validator_subscriptions(vec![sub1, sub2, sub3].into_iter()); // Unsubscription event should happen at the end of the slot. - let events = get_events(&mut attestation_service, None, 1).await; - matches::assert_matches!( - events[..3], - [ - SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); + // We wait for 2 slots, to avoid timeout issues + let events = get_events(&mut subnet_service, None, 2).await; let expected_subscription = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); let expected_unsubscription = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { - assert_eq!(expected_subscription, events[subnets_per_node * 3]); - assert_eq!(expected_unsubscription, events[subnets_per_node * 3 + 2]); + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!(expected_subscription, events[0]); + assert_eq!(expected_unsubscription, events[2]); } - assert_eq!(attestation_service.subscription_count(), 2); + // Check that there are no more subscriptions + assert_eq!(subnet_service.subscriptions().count(), 0); println!("{events:?}"); let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the // advance subscription time - let wait_slots = attestation_service + let wait_slots = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_slot) @@ -623,90 +603,68 @@ mod attestation_service { .as_millis() as u64 / SLOT_DURATION_MILLIS; - let no_events = dbg!(get_events(&mut attestation_service, None, wait_slots as u32).await); + let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); assert_eq!(no_events, []); - let second_subscribe_event = get_events(&mut attestation_service, None, 2).await; - // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service.is_subscribed( - &subnet_id1, - attestation_subnets::SubscriptionKind::LongLived, - ) { + let second_subscribe_event = get_events(&mut subnet_service, None, 2).await; + // If the permanent and short lived subnets are different, we should get an unsubscription event. + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { assert_eq!( - [SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id1 - ))], + [ + expected_subscription.clone(), + expected_unsubscription.clone(), + ], second_subscribe_event[..] ); } - } - #[tokio::test] - async fn test_update_deterministic_long_lived_subnets() { - let mut attestation_service = get_attestation_service(None); - let subnets_per_node = MainnetEthSpec::default_spec().subnets_per_node as usize; + let subscription_slot = current_slot + subscription_slot3 - 1; - let current_slot = attestation_service + let wait_slots = subnet_service .beacon_chain .slot_clock - .now() - .expect("Could not get current slot"); + .duration_to_slot(subscription_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; - let subscriptions = get_subscriptions(20, current_slot, 30, false); + let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); - // submit the subscriptions - attestation_service - .validator_subscriptions(subscriptions.into_iter()) - .unwrap(); - - // There should only be the same subscriptions as there are in the specification, - // regardless of subscriptions - assert_eq!( - attestation_service.long_lived_subscriptions().len(), - subnets_per_node - ); + assert_eq!(no_events, []); - let events = get_events(&mut attestation_service, None, 4).await; + let third_subscribe_event = get_events(&mut subnet_service, None, 2).await; - // Check that we attempt to subscribe and register ENRs - matches::assert_matches!( - events[..6], - [ - SubnetServiceMessage::Subscribe(_), - SubnetServiceMessage::EnrAdd(_), - SubnetServiceMessage::DiscoverPeers(_), - SubnetServiceMessage::Subscribe(_), - SubnetServiceMessage::EnrAdd(_), - SubnetServiceMessage::DiscoverPeers(_), - ] - ); + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!( + [expected_subscription, expected_unsubscription], + third_subscribe_event[..] + ); + } } -} - -mod sync_committee_service { - use super::*; #[tokio::test] - async fn subscribe_and_unsubscribe() { + async fn subscribe_and_unsubscribe_sync_committee() { // subscription config let validator_index = 1; let until_epoch = Epoch::new(1); let sync_committee_indices = vec![1]; // create the attestation service and subscriptions - let mut sync_committee_service = get_sync_committee_service(); + let mut subnet_service = get_subnet_service(); + let _events = get_events(&mut subnet_service, None, 0).await; - let subscriptions = vec![SyncCommitteeSubscription { - validator_index, - sync_committee_indices: sync_committee_indices.clone(), - until_epoch, - }]; + let subscriptions = + std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + })); // submit the subscriptions - sync_committee_service - .validator_subscriptions(subscriptions) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions); + + // Remove permanent subscription events let subnet_ids = SyncSubnetId::compute_subnets_for_sync_committee::( &sync_committee_indices, @@ -716,7 +674,7 @@ mod sync_committee_service { // Note: the unsubscription event takes 2 epochs (8 * 2 * 0.4 secs = 3.2 secs) let events = get_events( - &mut sync_committee_service, + &mut subnet_service, Some(5), (MainnetEthSpec::slots_per_epoch() * 3) as u32, // Have some buffer time before getting 5 events ) @@ -738,7 +696,7 @@ mod sync_committee_service { ); // Should be unsubscribed at the end. - assert_eq!(sync_committee_service.subscription_count(), 0); + assert_eq!(subnet_service.subscriptions().count(), 0); } #[tokio::test] @@ -749,21 +707,22 @@ mod sync_committee_service { let sync_committee_indices = vec![1]; // create the attestation service and subscriptions - let mut sync_committee_service = get_sync_committee_service(); + let mut subnet_service = get_subnet_service(); + // Get the initial events from permanent subnet subscriptions + let _events = get_events(&mut subnet_service, None, 1).await; - let subscriptions = vec![SyncCommitteeSubscription { - validator_index, - sync_committee_indices: sync_committee_indices.clone(), - until_epoch, - }]; + let subscriptions = + std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { + validator_index, + sync_committee_indices: sync_committee_indices.clone(), + until_epoch, + })); // submit the subscriptions - sync_committee_service - .validator_subscriptions(subscriptions) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut sync_committee_service, None, 1).await; + let events = get_events(&mut subnet_service, None, 1).await; matches::assert_matches!( events[..], [ @@ -777,28 +736,30 @@ mod sync_committee_service { // Event 1 is a duplicate of an existing subscription // Event 2 is the same subscription with lower `until_epoch` than the existing subscription let subscriptions = vec![ - SyncCommitteeSubscription { + Subscription::SyncCommittee(SyncCommitteeSubscription { validator_index, sync_committee_indices: sync_committee_indices.clone(), until_epoch, - }, - SyncCommitteeSubscription { + }), + Subscription::SyncCommittee(SyncCommitteeSubscription { validator_index, sync_committee_indices: sync_committee_indices.clone(), until_epoch: until_epoch - 1, - }, + }), ]; // submit the subscriptions - sync_committee_service - .validator_subscriptions(subscriptions) - .unwrap(); + subnet_service.validator_subscriptions(subscriptions.into_iter()); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut sync_committee_service, None, 1).await; + let events = get_events(&mut subnet_service, None, 1).await; matches::assert_matches!(events[..], [SubnetServiceMessage::DiscoverPeers(_),]); // Should be unsubscribed at the end. - assert_eq!(sync_committee_service.subscription_count(), 1); + let sync_committee_subscriptions = subnet_service + .subscriptions() + .filter(|s| matches!(s, Subnet::SyncCommittee(_))) + .count(); + assert_eq!(sync_committee_subscriptions, 1); } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index d701cbbb8d3..9bbd2bf295b 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -171,7 +171,10 @@ impl SingleBlockLookup { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { - ComponentRequests::WaitingForBlock => true, + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, ComponentRequests::ActiveBlobRequest(request, _) => { request.state.is_awaiting_event() } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 344e91711c4..5d02be2b4c1 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -362,6 +362,16 @@ impl SyncManager { self.sampling.get_request_status(block_root, index) } + #[cfg(test)] + pub(crate) fn range_sync_state(&self) -> super::range_sync::SyncChainStatus { + self.range_sync.state() + } + + #[cfg(test)] + pub(crate) fn update_execution_engine_state(&mut self, state: EngineState) { + self.handle_new_execution_engine_state(state); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index c4d987e8582..b6b7b315f3f 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -763,8 +763,7 @@ impl SyncNetworkContext { let requester = CustodyRequester(id); let mut request = ActiveCustodyRequest::new( block_root, - // TODO(das): req_id is duplicated here, also present in id - CustodyId { requester, req_id }, + CustodyId { requester }, &custody_indexes_to_fetch, self.log.clone(), ); diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs deleted file mode 100644 index df49543a6b6..00000000000 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ /dev/null @@ -1,13 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::Hash256; - -/// Trait that helps maintain RangeSync's implementation split from the BeaconChain -pub trait BlockStorage { - fn is_block_known(&self, block_root: &Hash256) -> bool; -} - -impl BlockStorage for BeaconChain { - fn is_block_known(&self, block_root: &Hash256) -> bool { - self.block_is_known_to_fork_choice(block_root) - } -} diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 1217fbf8fed..c030d0a19e8 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,12 +3,11 @@ //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. -use super::block_storage::BlockStorage; use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; @@ -37,10 +36,13 @@ pub enum RangeSyncState { Idle, } +pub type SyncChainStatus = + Result, &'static str>; + /// A collection of finalized and head chains currently being processed. -pub struct ChainCollection { +pub struct ChainCollection { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap>, /// The set of head chains being synced. @@ -51,8 +53,8 @@ pub struct ChainCollection { log: slog::Logger, } -impl ChainCollection { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { +impl ChainCollection { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), @@ -213,9 +215,7 @@ impl ChainCollection { } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self @@ -409,7 +409,8 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) + target_slot <= &local_finalized_slot + || beacon_chain.block_is_known_to_fork_choice(target_root) }; // Retain only head peers that remain relevant diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index d0f2f9217eb..8f881fba90f 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -2,7 +2,6 @@ //! peers. mod batch; -mod block_storage; mod chain; mod chain_collection; mod range; @@ -13,5 +12,7 @@ pub use batch::{ ByRangeRequestType, }; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; +#[cfg(test)] +pub use chain_collection::SyncChainStatus; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 0ef99838dee..78679403bb4 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -39,9 +39,8 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; -use super::chain_collection::ChainCollection; +use super::chain_collection::{ChainCollection, SyncChainStatus}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::status::ToStatusMessage; @@ -56,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; @@ -64,27 +63,26 @@ const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This /// holds the current state of the long range sync. -pub struct RangeSync> { +pub struct RangeSync { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// Last known sync info of our useful connected peers. We use this information to create Head /// chains after all finalized chains have ended. awaiting_head_peers: HashMap, /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. - chains: ChainCollection, + chains: ChainCollection, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache, /// The syncing logger. log: slog::Logger, } -impl RangeSync +impl RangeSync where - C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), @@ -96,9 +94,7 @@ where } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { self.chains.state() } @@ -382,465 +378,3 @@ where } } } - -#[cfg(test)] -mod tests { - use crate::network_beacon_processor::NetworkBeaconProcessor; - use crate::sync::SyncMessage; - use crate::NetworkMessage; - - use super::*; - use crate::sync::network_context::{BlockOrBlob, RangeRequestId}; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; - use beacon_chain::parking_lot::RwLock; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use beacon_chain::EngineState; - use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::service::api_types::SyncRequestId; - use lighthouse_network::{ - rpc::StatusMessage, service::api_types::AppRequestId, NetworkConfig, NetworkGlobals, - }; - use slog::{o, Drain}; - use slot_clock::TestingSlotClock; - use std::collections::HashSet; - use store::MemoryStore; - use tokio::sync::mpsc; - use types::{FixedBytesExtended, ForkName, MinimalEthSpec as E}; - - #[derive(Debug)] - struct FakeStorage { - known_blocks: RwLock>, - status: RwLock, - } - - impl Default for FakeStorage { - fn default() -> Self { - FakeStorage { - known_blocks: RwLock::new(HashSet::new()), - status: RwLock::new(StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::zero(), - finalized_epoch: 0usize.into(), - head_root: Hash256::zero(), - head_slot: 0usize.into(), - }), - } - } - } - - impl FakeStorage { - fn remember_block(&self, block_root: Hash256) { - self.known_blocks.write().insert(block_root); - } - - #[allow(dead_code)] - fn forget_block(&self, block_root: &Hash256) { - self.known_blocks.write().remove(block_root); - } - } - - impl BlockStorage for FakeStorage { - fn is_block_known(&self, block_root: &store::Hash256) -> bool { - self.known_blocks.read().contains(block_root) - } - } - - impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> StatusMessage { - self.status.read().clone() - } - } - - type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; - - fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - - #[allow(unused)] - struct TestRig { - log: slog::Logger, - /// To check what does sync send to the beacon processor. - beacon_processor_rx: mpsc::Receiver>, - /// To set up different scenarios where sync is told about known/unknown blocks. - chain: Arc, - /// Needed by range to handle communication with the network. - cx: SyncNetworkContext, - /// To check what the network receives from Range. - network_rx: mpsc::UnboundedReceiver>, - /// To modify what the network declares about various global variables, in particular about - /// the sync state of a peer. - globals: Arc>, - } - - impl RangeSync { - fn assert_state(&self, expected_state: RangeSyncType) { - assert_eq!( - self.state() - .expect("State is ok") - .expect("Range is syncing") - .0, - expected_state - ) - } - - #[allow(dead_code)] - fn assert_not_syncing(&self) { - assert!( - self.state().expect("State is ok").is_none(), - "Range should not be syncing." - ); - } - } - - impl TestRig { - fn local_info(&self) -> SyncInfo { - let StatusMessage { - fork_digest: _, - finalized_root, - finalized_epoch, - head_root, - head_slot, - } = self.chain.status.read().clone(); - SyncInfo { - head_slot, - head_root, - finalized_epoch, - finalized_root, - } - } - - /// Reads an BlocksByRange request to a given peer from the network receiver channel. - #[track_caller] - fn grab_request( - &mut self, - expected_peer: &PeerId, - fork_name: ForkName, - ) -> (AppRequestId, Option) { - let block_req_id = if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - request_id - } else { - panic!("Should have sent a batch request to the peer") - }; - let blob_req_id = if fork_name.deneb_enabled() { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } - } else { - None - }; - (block_req_id, blob_req_id) - } - - fn complete_range_block_and_blobs_response( - &mut self, - block_req: AppRequestId, - blob_req_opt: Option, - ) -> (ChainId, BatchId, Id) { - if blob_req_opt.is_some() { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let _ = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)); - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Blob(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } else { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } - } - - fn unwrap_range_request_id(sender_id: RangeRequestId) -> (ChainId, BatchId) { - if let RangeRequestId::RangeSync { chain_id, batch_id } = sender_id { - (chain_id, batch_id) - } else { - panic!("expected RangeSync request: {:?}", sender_id) - } - } - - /// Produce a head peer - fn head_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - // Get a peer with an advanced head - let head_root = Hash256::random(); - let head_slot = local_info.head_slot + 1; - let remote_info = SyncInfo { - head_root, - head_slot, - ..local_info - }; - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - fn finalized_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - let finalized_root = Hash256::random(); - let finalized_epoch = local_info.finalized_epoch + 2; - let head_slot = finalized_epoch.start_slot(E::slots_per_epoch()); - let head_root = Hash256::random(); - let remote_info = SyncInfo { - finalized_epoch, - finalized_root, - head_slot, - head_root, - }; - - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - #[track_caller] - fn expect_empty_processor(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - panic!( - "Expected empty processor. Instead got {}", - work.work_type_str() - ); - } - Err(e) => match e { - mpsc::error::TryRecvError::Empty => {} - mpsc::error::TryRecvError::Disconnected => unreachable!("bad coded test?"), - }, - } - } - - #[track_caller] - fn expect_chain_segment(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::WorkType::ChainSegment); - } - other => panic!("Expected chain segment process, found {:?}", other), - } - } - } - - fn range(log_enabled: bool) -> (TestRig, RangeSync) { - let log = build_log(slog::Level::Trace, log_enabled); - // Initialise a new beacon chain - let harness = BeaconChainHarness::>::builder(E) - .default_spec() - .logger(log.clone()) - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - let chain = harness.chain; - - let fake_store = Arc::new(FakeStorage::default()); - let range_sync = RangeSync::::new( - fake_store.clone(), - log.new(o!("component" => "range")), - ); - let (network_tx, network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); - let network_config = Arc::new(NetworkConfig::default()); - let globals = Arc::new(NetworkGlobals::new_test_globals( - Vec::new(), - &log, - network_config, - chain.spec.clone(), - )); - let (network_beacon_processor, beacon_processor_rx) = - NetworkBeaconProcessor::null_for_testing( - globals.clone(), - sync_tx, - chain.clone(), - harness.runtime.task_executor.clone(), - log.clone(), - ); - let cx = SyncNetworkContext::new( - network_tx, - Arc::new(network_beacon_processor), - chain, - log.new(o!("component" => "network_context")), - ); - let test_rig = TestRig { - log, - beacon_processor_rx, - chain: fake_store, - cx, - network_rx, - globals, - }; - (test_rig, range_sync) - } - - #[test] - fn head_chain_removed_while_finalized_syncing() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(false); - - // Get a peer with an advanced head - let (head_peer, local_info, remote_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, head_peer, remote_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Fail the head chain by disconnecting the peer. - range.remove_peer(&mut rig.cx, &head_peer); - range.assert_state(RangeSyncType::Finalized); - } - - #[test] - fn state_update_while_purging() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(true); - - // Get a peer with an advanced head - let (head_peer, local_info, head_info) = rig.head_peer(); - let head_peer_root = head_info.head_root; - range.add_peer(&mut rig.cx, local_info, head_peer, head_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - let finalized_peer_root = remote_info.finalized_root; - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Now the chain knows both chains target roots. - rig.chain.remember_block(head_peer_root); - rig.chain.remember_block(finalized_peer_root); - - // Add an additional peer to the second chain to make range update it's status - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - } - - #[test] - fn pause_and_resume_on_ee_offline() { - let (mut rig, mut range) = range(true); - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // add some peers - let (peer1, local_info, head_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork); - - let (chain1, batch1, id1) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // make the ee offline - rig.cx.update_execution_engine_state(EngineState::Offline); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // while the ee is offline, more peers might arrive. Add a new finalized peer. - let (peer2, local_info, finalized_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork); - - let (chain2, batch2, id2) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // make the beacon processor available again. - rig.cx.update_execution_engine_state(EngineState::Online); - - // now resume range, we should have two processing requests in the beacon processor. - range.resume(&mut rig.cx); - - rig.expect_chain_segment(); - rig.expect_chain_segment(); - } -} diff --git a/beacon_node/network/src/sync/range_sync/sync_type.rs b/beacon_node/network/src/sync/range_sync/sync_type.rs index d6ffd4a5dfb..4ff7e393101 100644 --- a/beacon_node/network/src/sync/range_sync/sync_type.rs +++ b/beacon_node/network/src/sync/range_sync/sync_type.rs @@ -1,10 +1,9 @@ //! Contains logic about identifying which Sync to perform given PeerSyncInfo of ourselves and //! of a remote. +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::SyncInfo; -use super::block_storage::BlockStorage; - /// The type of Range sync that should be done relative to our current state. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RangeSyncType { @@ -17,8 +16,8 @@ pub enum RangeSyncType { impl RangeSyncType { /// Determines the type of sync given our local `PeerSyncInfo` and the remote's /// `PeerSyncInfo`. - pub fn new( - chain: &C, + pub fn new( + chain: &BeaconChain, local_info: &SyncInfo, remote_info: &SyncInfo, ) -> RangeSyncType { @@ -29,7 +28,7 @@ impl RangeSyncType { // not seen the finalized hash before. if remote_info.finalized_epoch > local_info.finalized_epoch - && !chain.is_block_known(&remote_info.finalized_root) + && !chain.block_is_known_to_fork_choice(&remote_info.finalized_root) { RangeSyncType::Finalized } else { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 9f2c9ef66f0..94aacad3e81 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -83,6 +83,7 @@ impl TestRig { .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() + .mock_execution_layer() .testing_slot_clock(TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), @@ -144,7 +145,7 @@ impl TestRig { } } - fn test_setup() -> Self { + pub fn test_setup() -> Self { Self::test_setup_with_config(None) } @@ -168,11 +169,11 @@ impl TestRig { } } - fn log(&self, msg: &str) { + pub fn log(&self, msg: &str) { info!(self.log, "TEST_RIG"; "msg" => msg); } - fn after_deneb(&self) -> bool { + pub fn after_deneb(&self) -> bool { matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) } @@ -238,7 +239,7 @@ impl TestRig { (parent, block, parent_root, block_root) } - fn send_sync_message(&mut self, sync_message: SyncMessage) { + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { self.sync_manager.handle_message(sync_message); } @@ -369,7 +370,7 @@ impl TestRig { self.expect_empty_network(); } - fn new_connected_peer(&mut self) -> PeerId { + pub fn new_connected_peer(&mut self) -> PeerId { self.network_globals .peers .write() @@ -811,7 +812,7 @@ impl TestRig { } } - fn peer_disconnected(&mut self, peer_id: PeerId) { + pub fn peer_disconnected(&mut self, peer_id: PeerId) { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } @@ -827,7 +828,7 @@ impl TestRig { } } - fn pop_received_network_event) -> Option>( + pub fn pop_received_network_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -847,7 +848,7 @@ impl TestRig { } } - fn pop_received_processor_event) -> Option>( + pub fn pop_received_processor_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -871,6 +872,16 @@ impl TestRig { } } + pub fn expect_empty_processor(&mut self) { + self.drain_processor_rx(); + if !self.beacon_processor_rx_queue.is_empty() { + panic!( + "Expected processor to be empty, but has events: {:?}", + self.beacon_processor_rx_queue + ); + } + } + fn find_block_lookup_request( &mut self, for_block: Hash256, @@ -2173,7 +2184,8 @@ fn custody_lookup_happy_path() { mod deneb_only { use super::*; use beacon_chain::{ - block_verification_types::RpcBlock, data_availability_checker::AvailabilityCheckError, + block_verification_types::{AsBlock, RpcBlock}, + data_availability_checker::AvailabilityCheckError, }; use ssz_types::VariableList; use std::collections::VecDeque; diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 8b137891791..6faa8b72472 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -1 +1,273 @@ +use super::*; +use crate::status::ToStatusMessage; +use crate::sync::manager::SLOT_IMPORT_TOLERANCE; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; +use beacon_chain::EngineState; +use lighthouse_network::rpc::{RequestType, StatusMessage}; +use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; +use lighthouse_network::{PeerId, SyncInfo}; +use std::time::Duration; +use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; +const D: Duration = Duration::new(0, 0); + +impl TestRig { + /// Produce a head peer with an advanced head + fn add_head_peer(&mut self) -> PeerId { + self.add_head_peer_with_root(Hash256::random()) + } + + /// Produce a head peer with an advanced head + fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { + let local_info = self.local_info(); + self.add_peer(SyncInfo { + head_root, + head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), + ..local_info + }) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer(&mut self) -> PeerId { + self.add_finalized_peer_with_root(Hash256::random()) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { + let local_info = self.local_info(); + let finalized_epoch = local_info.finalized_epoch + 2; + self.add_peer(SyncInfo { + finalized_epoch, + finalized_root, + head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), + head_root: Hash256::random(), + }) + } + + fn local_info(&self) -> SyncInfo { + let StatusMessage { + fork_digest: _, + finalized_root, + finalized_epoch, + head_root, + head_slot, + } = self.harness.chain.status_message(); + SyncInfo { + head_slot, + head_root, + finalized_epoch, + finalized_root, + } + } + + fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId { + // Create valid peer known to network globals + let peer_id = self.new_connected_peer(); + // Send peer to sync + self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info.clone())); + peer_id + } + + fn assert_state(&self, state: RangeSyncType) { + assert_eq!( + self.sync_manager + .range_sync_state() + .expect("State is ok") + .expect("Range should be syncing") + .0, + state, + "not expected range sync state" + ); + } + + #[track_caller] + fn expect_chain_segment(&mut self) { + self.pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expect ChainSegment work event: {e:?}")); + } + + fn update_execution_engine_state(&mut self, state: EngineState) { + self.log(&format!("execution engine state updated: {state:?}")); + self.sync_manager.update_execution_engine_state(state); + } + + fn find_blocks_by_range_request(&mut self, target_peer_id: &PeerId) -> (Id, Option) { + let block_req_id = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlocksByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blocks by range request"); + + let blob_req_id = if self.after_deneb() { + Some( + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlobsByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blobs by range request"), + ) + } else { + None + }; + + (block_req_id, blob_req_id) + } + + fn find_and_complete_blocks_by_range_request(&mut self, target_peer_id: PeerId) { + let (blocks_req_id, blobs_req_id) = self.find_blocks_by_range_request(&target_peer_id); + + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlocksByRange request {blocks_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlock { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, + peer_id: target_peer_id, + beacon_block: None, + seen_timestamp: D, + }); + + if let Some(blobs_req_id) = blobs_req_id { + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlobsByRange request {blobs_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlob { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blobs_req_id }, + peer_id: target_peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + } + + async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + self.harness.advance_slot(); + + let block_root = self + .harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness + .chain + .store + .get_full_block(&block_root) + .unwrap() + .unwrap() + } + + async fn remember_block(&mut self, block: SignedBeaconBlock) { + self.harness + .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + .await + .unwrap(); + } +} + +#[test] +fn head_chain_removed_while_finalized_syncing() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2821 + let mut rig = TestRig::test_setup(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer(); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer(); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Fail the head chain by disconnecting the peer. + rig.peer_disconnected(head_peer); + rig.assert_state(RangeSyncType::Finalized); +} + +#[tokio::test] +async fn state_update_while_purging() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2827 + let mut rig = TestRig::test_setup(); + + // Create blocks on a separate harness + let mut rig_2 = TestRig::test_setup(); + // Need to create blocks that can be inserted into the fork-choice and fit the "known + // conditions" below. + let head_peer_block = rig_2.create_canonical_block().await; + let head_peer_root = head_peer_block.canonical_root(); + let finalized_peer_block = rig_2.create_canonical_block().await; + let finalized_peer_root = finalized_peer_block.canonical_root(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer_with_root(head_peer_root); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Now the chain knows both chains target roots. + rig.remember_block(head_peer_block).await; + rig.remember_block(finalized_peer_block).await; + + // Add an additional peer to the second chain to make range update it's status + rig.add_finalized_peer(); +} + +#[test] +fn pause_and_resume_on_ee_offline() { + let mut rig = TestRig::test_setup(); + + // add some peers + let peer1 = rig.add_head_peer(); + // make the ee offline + rig.update_execution_engine_state(EngineState::Offline); + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer1); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // while the ee is offline, more peers might arrive. Add a new finalized peer. + let peer2 = rig.add_finalized_peer(); + + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer2); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + // make the beacon processor available again. + // update_execution_engine_state implicitly calls resume + // now resume range, we should have two processing requests in the beacon processor. + rig.update_execution_engine_state(EngineState::Online); + + rig.expect_chain_segment(); + rig.expect_chain_segment(); +} diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 4de9d351f3c..083c1170f07 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -105,7 +105,7 @@ impl SplitAttestation { } } -impl<'a, E: EthSpec> CompactAttestationRef<'a, E> { +impl CompactAttestationRef<'_, E> { pub fn attestation_data(&self) -> AttestationData { AttestationData { slot: self.data.slot, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 3a002bf8703..d01c73118c6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -877,11 +877,11 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); // Only run this test on the phase0 hard-fork. - if spec.altair_fork_epoch != None { + if spec.altair_fork_epoch.is_some() { return; } - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) @@ -902,10 +902,10 @@ mod release_tests { ); for (atts, aggregate) in &attestations { - let att2 = aggregate.as_ref().unwrap().message().aggregate().clone(); + let att2 = aggregate.as_ref().unwrap().message().aggregate(); let att1 = atts - .into_iter() + .iter() .map(|(att, _)| att) .take(2) .fold::>, _>(None, |att, new_att| { @@ -946,7 +946,7 @@ mod release_tests { .unwrap(); assert_eq!( - committees.get(0).unwrap().committee.len() - 2, + committees.first().unwrap().committee.len() - 2, earliest_attestation_validators( &att2_split.as_ref(), &state, @@ -963,7 +963,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); let op_pool = OperationPool::::new(); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state @@ -1020,7 +1020,7 @@ mod release_tests { let agg_att = &block_attestations[0]; assert_eq!( agg_att.num_set_aggregation_bits(), - spec.target_committee_size as usize + spec.target_committee_size ); // Prune attestations shouldn't do anything at this point. @@ -1039,7 +1039,7 @@ mod release_tests { fn attestation_duplicate() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1082,7 +1082,7 @@ mod release_tests { fn attestation_pairwise_overlapping() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1113,19 +1113,17 @@ mod release_tests { let aggs1 = atts1 .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1136,19 +1134,17 @@ mod release_tests { .as_slice() .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1181,7 +1177,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1194,7 +1190,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; let num_validators = num_committees * MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; @@ -1209,12 +1205,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1296,7 +1292,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); let slot = state.slot(); @@ -1308,7 +1304,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). @@ -1329,12 +1325,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1615,7 +1611,6 @@ mod release_tests { let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1674,7 +1669,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1711,7 +1705,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1791,7 +1784,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7cee16c3535..21d0cf8dec8 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -5,34 +5,34 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } beacon_chain = { workspace = true } criterion = { workspace = true } rand = { workspace = true, features = ["small_rng"] } +tempfile = { workspace = true } [dependencies] +bls = { workspace = true } db-key = "0.0.5" -leveldb = { version = "0.8" } -parking_lot = { workspace = true } -itertools = { workspace = true } +directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -superstruct = { workspace = true } -types = { workspace = true } +itertools = { workspace = true } +leveldb = { version = "0.8" } +logging = { workspace = true } +lru = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } safe_arith = { workspace = true } -state_processing = { workspace = true } -slog = { workspace = true } serde = { workspace = true } -metrics = { workspace = true } -lru = { workspace = true } +slog = { workspace = true } sloggers = { workspace = true } -directory = { workspace = true } +smallvec = { workspace = true } +state_processing = { workspace = true } strum = { workspace = true } +superstruct = { workspace = true } +types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } -bls = { workspace = true } -smallvec = { workspace = true } -logging = { workspace = true } [[bench]] name = "hdiff" diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index b3322b5225d..8f6682e7581 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -56,7 +56,7 @@ where } } -impl<'a, F, E, Hot, Cold> Iterator for ChunkedVectorIter<'a, F, E, Hot, Cold> +impl Iterator for ChunkedVectorIter<'_, F, E, Hot, Cold> where F: Field, E: EthSpec, diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index e0f44f3affb..27769a310ac 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -149,8 +149,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for FrozenForwardsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for FrozenForwardsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot)>; @@ -349,8 +349,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for HybridForwardsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for HybridForwardsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot)>; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4942b148810..da3e6d4ebcb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2484,6 +2484,45 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Run a compaction pass on the freezer DB to free up space used by deleted states. + pub fn compact_freezer(&self) -> Result<(), Error> { + let current_schema_columns = vec![ + DBColumn::BeaconColdStateSummary, + DBColumn::BeaconStateSnapshot, + DBColumn::BeaconStateDiff, + DBColumn::BeaconStateRoots, + ]; + + // We can remove this once schema V21 has been gone for a while. + let previous_schema_columns = vec![ + DBColumn::BeaconState, + DBColumn::BeaconStateSummary, + DBColumn::BeaconBlockRootsChunked, + DBColumn::BeaconStateRootsChunked, + DBColumn::BeaconRestorePoint, + DBColumn::BeaconHistoricalRoots, + DBColumn::BeaconRandaoMixes, + DBColumn::BeaconHistoricalSummaries, + ]; + let mut columns = current_schema_columns; + columns.extend(previous_schema_columns); + + for column in columns { + info!( + self.log, + "Starting compaction"; + "column" => ?column + ); + self.cold_db.compact_column(column)?; + info!( + self.log, + "Finishing compaction"; + "column" => ?column + ); + } + Ok(()) + } + /// Return `true` if compaction on finalization/pruning is enabled. pub fn compact_on_prune(&self) -> bool { self.config.compact_on_prune @@ -2875,6 +2914,7 @@ impl, Cold: ItemStore> HotColdDB // // We can remove this once schema V21 has been gone for a while. let previous_schema_columns = vec![ + DBColumn::BeaconState, DBColumn::BeaconStateSummary, DBColumn::BeaconBlockRootsChunked, DBColumn::BeaconStateRootsChunked, @@ -2916,7 +2956,7 @@ impl, Cold: ItemStore> HotColdDB self.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. - self.cold_db.compact()?; + self.compact_freezer()?; Ok(()) } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 71dc96d99e9..97a88c01c82 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -53,8 +53,8 @@ pub struct StateRootsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore inner: RootsIterator<'a, E, Hot, Cold>, } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone - for StateRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Clone + for StateRootsIterator<'_, E, Hot, Cold> { fn clone(&self) -> Self { Self { @@ -77,8 +77,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> StateRootsIterator<' } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for StateRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for StateRootsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot), Error>; @@ -101,8 +101,8 @@ pub struct BlockRootsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore inner: RootsIterator<'a, E, Hot, Cold>, } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone - for BlockRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Clone + for BlockRootsIterator<'_, E, Hot, Cold> { fn clone(&self) -> Self { Self { @@ -136,8 +136,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockRootsIterator<' } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for BlockRootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for BlockRootsIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, Slot), Error>; @@ -155,9 +155,7 @@ pub struct RootsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> slot: Slot, } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone - for RootsIterator<'a, E, Hot, Cold> -{ +impl, Cold: ItemStore> Clone for RootsIterator<'_, E, Hot, Cold> { fn clone(&self) -> Self { Self { store: self.store, @@ -232,8 +230,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, E, } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for RootsIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for RootsIterator<'_, E, Hot, Cold> { /// (block_root, state_root, slot) type Item = Result<(Hash256, Hash256, Slot), Error>; @@ -295,8 +293,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for ParentRootBlockIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for ParentRootBlockIterator<'_, E, Hot, Cold> { type Item = Result<(Hash256, SignedBeaconBlock>), Error>; @@ -336,8 +334,8 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, E, } } -impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator - for BlockIterator<'a, E, Hot, Cold> +impl, Cold: ItemStore> Iterator + for BlockIterator<'_, E, Hot, Cold> { type Item = Result>, Error>; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 0498c7c1e2c..09ae9a32dd0 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -332,7 +332,7 @@ pub enum DBColumn { BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, - /// For Optimistically Imported Merge Transition Blocks + /// DEPRECATED. For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 2eb40f47b18..22eecdcc605 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -136,7 +136,7 @@ where pub earliest_consolidation_epoch: Epoch, #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[superstruct(only(Electra))] pub pending_partial_withdrawals: List, @@ -403,7 +403,7 @@ impl TryInto> for PartialBeaconState { earliest_exit_epoch, consolidation_balance_to_consume, earliest_consolidation_epoch, - pending_balance_deposits, + pending_deposits, pending_partial_withdrawals, pending_consolidations ], diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index afb93f3657d..546cc2ed41c 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } -slot_clock = { workspace = true } -tokio = { workspace = true } slog = { workspace = true } +slot_clock = { workspace = true } task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index c38ee58e3b0..44d7702e5ff 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -33,9 +33,8 @@ * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Installation](./ui-installation.md) - * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) + * [Authentication](./ui-authentication.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) @@ -66,3 +65,4 @@ * [Development Environment](./setup.md) * [FAQs](./faq.md) * [Protocol Developers](./developers.md) +* [Security Researchers](./security.md) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index d8d6ea61a18..b558279730e 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -56,7 +56,7 @@ that we have observed are: _a lot_ of space. It's even possible to push beyond that with `--hierarchy-exponents 0` which would store a full state every single slot (NOT RECOMMENDED). - **Less diff layers are not necessarily faster**. One might expect that the fewer diff layers there - are, the less work Lighthouse would have to do to reconstruct any particular state. In practise + are, the less work Lighthouse would have to do to reconstruct any particular state. In practice this seems to be offset by the increased size of diffs in each layer making the diffs take longer to apply. We observed no significant performance benefit from `--hierarchy-exponents 5,7,11`, and a substantial increase in space consumed. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 732b4f51e65..c0f6b5485ef 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -68,7 +68,7 @@ The steps to do port forwarding depends on the router, but the general steps are 1. Determine the default gateway IP: - On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. - - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. + - On macOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. @@ -91,7 +91,7 @@ The steps to do port forwarding depends on the router, but the general steps are - Internal port: `9001` - IP address: Choose the device that is running Lighthouse. -1. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). +1. To check that you have successfully opened the ports, go to [`yougetsignal`](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). ## ENR Configuration @@ -141,7 +141,7 @@ To listen over both IPv4 and IPv6: - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note - that this behaviour differs from the Ipv6 only case described above. + that this behaviour differs from the IPv6 only case described above. - If necessary, set the `--port6` flag to configure the port used for TCP and UDP over IPv6. This flag has no effect when listening over IPv6 only. - If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index b63505c4901..5428ab8f9ae 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -508,23 +508,31 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { - "schema_version": 18, + "schema_version": 22, "config": { - "slots_per_restore_point": 8192, - "slots_per_restore_point_set_explicitly": false, "block_cache_size": 5, + "state_cache_size": 128, + "compression_level": 1, "historic_state_cache_size": 1, + "hdiff_buffer_cache_size": 16, "compact_on_init": false, "compact_on_prune": true, "prune_payloads": true, + "hierarchy_config": { + "exponents": [ + 5, + 7, + 11 + ] + }, "prune_blobs": true, "epochs_per_blob_prune": 1, "blob_prune_margin_epochs": 0 }, "split": { - "slot": "7454656", - "state_root": "0xbecfb1c8ee209854c611ebc967daa77da25b27f1a8ef51402fdbe060587d7653", - "block_root": "0x8730e946901b0a406313d36b3363a1b7091604e1346a3410c1a7edce93239a68" + "slot": "10530592", + "state_root": "0xd27e6ce699637cf9b5c7ca632118b7ce12c2f5070bb25a27ac353ff2799d4466", + "block_root": "0x71509a1cb374773d680cd77148c73ab3563526dacb0ab837bb0c87e686962eae" }, "anchor": { "anchor_slot": "7451168", @@ -543,8 +551,19 @@ curl "http://localhost:5052/lighthouse/database/info" | jq For more information about the split point, see the [Database Configuration](./advanced_database.md) docs. -The `anchor` will be `null` unless the node has been synced with checkpoint sync and state -reconstruction has yet to be completed. For more information +For archive nodes, the `anchor` will be: + +```json +"anchor": { + "anchor_slot": "0", + "oldest_block_slot": "0", + "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_upper_limit": "0", + "state_lower_limit": "0" + }, +``` + +indicating that all states with slots `>= 0` are available, i.e., full state history. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index adde78270a6..f792ee870e0 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -18,12 +18,13 @@ Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ## Obtaining the API token The API token is stored as a file in the `validators` directory. For most users -this is `~/.lighthouse/{network}/validators/api-token.txt`. Here's an -example using the `cat` command to print the token to the terminal, but any +this is `~/.lighthouse/{network}/validators/api-token.txt`, unless overridden using the +`--http-token-path` CLI parameter. Here's an +example using the `cat` command to print the token for mainnet to the terminal, but any text editor will suffice: ```bash -cat api-token.txt +cat ~/.lighthouse/mainnet/validators/api-token.txt hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 80eba7a0590..98605a3dcd0 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -53,7 +53,7 @@ Example Response Body: } ``` -> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. +> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you've specified a custom token path using `--http-token-path`, use that path instead. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. > As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh`. In this case, you obtain the token from the file `api-token.txt` and the command becomes: diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 6d75b901004..a9bfb00ccda 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,6 +16,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes | | v5.2.0 | Jun 2024 | v19 | no | | v5.1.0 | Mar 2024 | v19 | no | @@ -208,6 +209,7 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| +| v6.0.0 | Nov 2024 | v22 | no | | v5.3.0 | Aug 2024 | v21 | yes | | v5.2.0 | Jun 2024 | v19 | yes before Deneb using <= v5.2.1 | | v5.1.0 | Mar 2024 | v19 | yes before Deneb using <= v5.2.1 | diff --git a/book/src/faq.md b/book/src/faq.md index 04e5ce5bc8f..d23951c8c77 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -92,7 +92,7 @@ If the reason for the error message is caused by no. 1 above, you may want to lo - Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. - The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. -- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using Geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. ### I see beacon logs showing `Error during execution engine upcheck`, what should I do? @@ -302,7 +302,7 @@ An example of the log: (debug logs can be found under `$datadir/beacon/logs`): Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441 ``` -The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. +The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation will fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. Another example of log: @@ -315,7 +315,7 @@ In this example, we see that the `execution_time_ms` is 4694ms. The `execution_t ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? -In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. +In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. diff --git a/book/src/graffiti.md b/book/src/graffiti.md index ba9c7d05d70..7b402ea866f 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -4,7 +4,7 @@ Lighthouse provides four options for setting validator graffiti. ## 1. Using the "--graffiti-file" flag on the validator client -Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded everytime a validator is chosen to propose a block. +Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded every time a validator is chosen to propose a block. Usage: `lighthouse vc --graffiti-file graffiti_file.txt` diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 2cfbfbc857a..71e21d68c91 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -69,6 +69,10 @@ Options: this server (e.g., http://localhost:5062). --http-port Set the listen TCP port for the RESTful HTTP API server. + --http-token-path + Path to file containing the HTTP API token for validator client + authentication. If not specified, defaults to + {validators-dir}/api-token.txt. --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] diff --git a/book/src/homebrew.md b/book/src/homebrew.md index da92dcb26ce..f94764889e6 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repository. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 4a00f33aa44..fca156bda3f 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -46,24 +46,31 @@ You can track the reasons for re-orgs being attempted (or not) via Lighthouse's A pair of messages at `INFO` level will be logged if a re-org opportunity is detected: -> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 - -> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +```text +INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +``` This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: -> DEBG Not attempting re-org reason: head not late +```text +DEBG Not attempting re-org reason: head not late +``` If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages when Lighthouse thinks that a re-org is likely: -> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` [the spec]: https://github.com/ethereum/consensus-specs/pull/3034 diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 106a5e89472..f2662f4a69a 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -21,7 +21,6 @@ The UI is currently in active development. It resides in the See the following Siren specific topics for more context-specific information: -- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. - [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. diff --git a/book/src/resources/2020-lh-trail-of-bits.pdf b/book/src/resources/2020-lh-trail-of-bits.pdf new file mode 100644 index 00000000000..162bef53f05 Binary files /dev/null and b/book/src/resources/2020-lh-trail-of-bits.pdf differ diff --git a/book/src/security.md b/book/src/security.md new file mode 100644 index 00000000000..0af57db7f9d --- /dev/null +++ b/book/src/security.md @@ -0,0 +1,11 @@ +# Security + +Lighthouse takes security seriously. Please see our security policy on GitHub for our PGP key and information on reporting vulnerabilities: + +- [GitHub: Security Policy](https://github.com/sigp/lighthouse/blob/stable/SECURITY.md) + +## Past Security Assessments + +Reports from previous security assessments can be found below: + +- [December 2020 - Trail of Bits](./resources/2020-lh-trail-of-bits.pdf) diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md index 9e3a94db78d..81b867bae26 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui-authentication.md @@ -2,12 +2,12 @@ ## Siren Session -For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of user validators. The session password must be set during the [installation](./ui-installation.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. +For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of the user's validators. The session password must be set during the [configuration](./ui-configuration.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. ![exit](imgs/ui-session.png) ## Protected Actions -Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [installation process](./ui-installation.md). +Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [configuration process](./ui-configuration.md). ![exit](imgs/ui-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index eeb2c9a51cd..34cc9fe7ca6 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -1,37 +1,116 @@ -# Configuration +# 📦 Installation -Siren requires a connection to both a Lighthouse Validator Client and a Lighthouse Beacon Node. -To enable connection, you must generate .env file based on the provided .env.example +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren). + +## Version Requirement + +To ensure proper functionality, the Siren app requires Lighthouse v4.3.0 or higher. You can find these versions on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. -## Connecting to the Clients +## Configuration + +Siren requires a connection to both a Lighthouse Validator Client and a Lighthouse Beacon Node. Both the Beacon node and the Validator client need to have their HTTP APIs enabled. -These ports should be accessible from Siren. +These ports should be accessible from Siren. This means adding the flag `--http` on both beacon node and validator client. To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. > The Beacon Node must be run with the `--gui` flag set. -If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. +## Running the Docker container (Recommended) + +We recommend running Siren's container next to your beacon node (on the same server), as it's essentially a webapp that you can access with any browser. + + 1. Create a directory to run Siren: + + ```bash + cd ~ + mkdir Siren + cd Siren + ``` + + 1. Create a configuration file in the `Siren` directory: `nano .env` and insert the following fields to the `.env` file. The field values are given here as an example, modify the fields as necessary. For example, the `API_TOKEN` can be obtained from [`Validator Client Authorization Header`](./api-vc-auth-header.md) + + A full example with all possible configuration options can be found [here](https://github.com/sigp/siren/blob/stable/.env.example). + + ``` + BEACON_URL=http://localhost:5052 + VALIDATOR_URL=http://localhost:5062 + API_TOKEN=R6YhbDO6gKjNMydtZHcaCovFbQ0izq5Hk + SESSION_PASSWORD=your_password + ``` + + 1. You can now start Siren with: + + ```bash + docker run --rm -ti --name siren --env-file $PWD/.env --net host sigp/siren + ``` + + Note that, due to the `--net=host` flag, this will expose Siren on ports 3000, 80, and 443. Preferably, only the latter should be accessible. Adjust your firewall and/or skip the flag wherever possible. + + If it fails to start, an error message will be shown. For example, the error + + ``` + http://localhost:5062 unreachable, check settings and connection + ``` + + means that the validator client is not running, or the `--http` flag is not provided, or otherwise inaccessible from within the container. Another common error is: + + ``` + validator api issue, server response: 403 + ``` + + which means that the API token is incorrect. Check that you have provided the correct token in the field `API_TOKEN` in `.env`. + + When Siren has successfully started, you should see the log `LOG [NestApplication] Nest application successfully started +118ms`, indicating that Siren has started. + + 1. Siren is now accessible at `https://` (when used with `--net=host`). You will get a warning about an invalid certificate, this can be safely ignored. + + > Note: We recommend setting a strong password when running Siren to protect it from unauthorized access. + +Advanced users can mount their own certificates or disable SSL altogether, see the `SSL Certificates` section below. + +## Building From Source + +### Docker + +The docker image can be built with the following command: +`docker build -f Dockerfile -t siren .` + +### Building locally + +To build from source, ensure that your system has `Node v18.18` and `yarn` installed. + +#### Build and run the backend + +Navigate to the backend directory `cd backend`. Install all required Node packages by running `yarn`. Once the installation is complete, compile the backend with `yarn build`. Deploy the backend in a production environment, `yarn start:production`. This ensures optimal performance. + +#### Build and run the frontend + +After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. + +This will allow you to access siren at `http://localhost:3000` by default. + +## Advanced configuration + +### About self-signed SSL certificates + +By default, internally, Siren is running on port 80 (plain, behind nginx), port 3000 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). + +#### Generating persistent SSL certificates and installing them to your system -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. +[mkcert](https://github.com/FiloSottile/mkcert) is a tool that makes it super easy to generate a self-signed certificate that is trusted by your browser. -In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. +To use it for `siren`, install it following the instructions. Then, run `mkdir certs; mkcert -cert-file certs/cert.pem -key-file certs/key.pem 127.0.0.1 localhost` (add or replace any IP or hostname that you would use to access it at the end of this command). +To use these generated certificates, add this to to your `docker run` command: `-v $PWD/certs:/certs` -If you run the Docker container, it will fail to startup if your BN/VC are not accessible, or if you provided a wrong API token. +The nginx SSL config inside Siren's container expects 3 files: `/certs/cert.pem` `/certs/key.pem` `/certs/key.pass`. If `/certs/cert.pem` does not exist, it will generate a self-signed certificate as mentioned above. If `/certs/cert.pem` does exist, it will attempt to use your provided or persisted certificates. -## API Token +### Configuration through environment variables -The API Token is a secret key that allows you to connect to the validator -client. The validator client's HTTP API is guarded by this key because it -contains sensitive validator information and the ability to modify -validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) -for further details. +For those who prefer to use environment variables to configure Siren instead of using an `.env` file, this is fully supported. In some cases this may even be preferred. -Siren requires this token in order to connect to the Validator client. -The token is located in the default data directory of the validator -client. The default path is -`~/.lighthouse//validators/api-token.txt`. +#### Docker installed through `snap` -The contents of this file for the desired validator client needs to be -entered. +If you installed Docker through a snap (i.e. on Ubuntu), Docker will have trouble accessing the `.env` file. In this case it is highly recommended to pass the config to the container with environment variables. +Note that the defaults in `.env.example` will be used as fallback, if no other value is provided. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index efa6d3d4ab2..29de889e5fc 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -6,19 +6,20 @@ Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to func ## 2. Where can I find my API token? -The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). +The required API token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? -If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui [`configuration`](./ui-configuration.md#configuration). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +That being said, it is entirely possible to have it published over the internet, how to do that goes well beyond the scope of this document but we want to emphasize once more the need for *at least* SSL encryption if you choose to do so. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? -Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`configuration`](./ui-configuration.md#configuration). ## 6. Does Siren support reverse proxy or DNS named addresses? diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index 1444c0d6331..9cd84e5160b 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -1,6 +1,6 @@ # 📦 Installation -Siren supports any operating system that supports container runtimes and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, macOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. ## Version Requirement diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 092c813a1ea..eef563dcdb7 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -56,7 +56,6 @@ The following fields are returned: able to vote) during the current epoch. - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to the majority-elected Casper FFG target epoch during the current epoch. -- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md index a71fab1e3ad..11df2af0378 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator-manager.md @@ -32,3 +32,4 @@ The `validator-manager` boasts the following features: - [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) - [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) +- [Managing validators such as delete, import and list validators.](./validator-manager-api.md) diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 6439ea83a32..bbc95460ec9 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -134,7 +134,7 @@ validator_monitor_attestation_simulator_source_attester_hit_total validator_monitor_attestation_simulator_source_attester_miss_total ``` -A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). +A Grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 76d41ae11a8..7c8d2b16fd4 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,24 +1,24 @@ [package] name = "boot_node" -version = "5.3.0" +version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] beacon_node = { workspace = true } +bytes = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } +eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } +hex = { workspace = true } +lighthouse_network = { workspace = true } log = { workspace = true } -slog-term = { workspace = true } logging = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -hex = { workspace = true } -serde = { workspace = true } -eth2_network_config = { workspace = true } -bytes = { workspace = true } +slog-term = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index e66bf14233a..dece975d37e 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "account_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } -eth2_wallet = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } +eth2_wallet = { workspace = true } filesystem = { workspace = true } -zeroize = { workspace = true } +rand = { workspace = true } +regex = { workspace = true } +rpassword = "5.0.0" serde = { workspace = true } serde_yaml = { workspace = true } slog = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } -regex = { workspace = true } -rpassword = "5.0.0" -directory = { workspace = true } +zeroize = { workspace = true } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index c1fa621abb1..0f576efb3ab 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -8,18 +8,14 @@ use eth2_wallet::{ }; use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; -use serde::{Deserialize, Serialize}; +use std::fs::{self, File}; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::str::from_utf8; use std::thread::sleep; use std::time::Duration; -use std::{ - fs::{self, File}, - str::FromStr, -}; -use zeroize::Zeroize; +use zeroize::Zeroizing; pub mod validator_definitions; @@ -69,8 +65,8 @@ pub fn read_password>(path: P) -> Result { fs::read(path).map(strip_off_newlines).map(Into::into) } -/// Reads a password file into a `ZeroizeString` struct, with new-lines removed. -pub fn read_password_string>(path: P) -> Result { +/// Reads a password file into a `Zeroizing` struct, with new-lines removed. +pub fn read_password_string>(path: P) -> Result, String> { fs::read(path) .map_err(|e| format!("Error opening file: {:?}", e)) .map(strip_off_newlines) @@ -112,8 +108,8 @@ pub fn random_password() -> PlainText { random_password_raw_string().into_bytes().into() } -/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `ZeroizeString`. -pub fn random_password_string() -> ZeroizeString { +/// Generates a random alphanumeric password of length `DEFAULT_PASSWORD_LEN` as `Zeroizing`. +pub fn random_password_string() -> Zeroizing { random_password_raw_string().into() } @@ -141,7 +137,7 @@ pub fn strip_off_newlines(mut bytes: Vec) -> Vec { } /// Reads a password from TTY or stdin if `use_stdin == true`. -pub fn read_password_from_user(use_stdin: bool) -> Result { +pub fn read_password_from_user(use_stdin: bool) -> Result, String> { let result = if use_stdin { rpassword::prompt_password_stderr("") .map_err(|e| format!("Error reading from stdin: {}", e)) @@ -150,7 +146,7 @@ pub fn read_password_from_user(use_stdin: bool) -> Result .map_err(|e| format!("Error reading from tty: {}", e)) }; - result.map(ZeroizeString::from) + result.map(Zeroizing::from) } /// Reads a mnemonic phrase from TTY or stdin if `use_stdin == true`. @@ -210,46 +206,6 @@ pub fn mnemonic_from_phrase(phrase: &str) -> Result { Mnemonic::from_phrase(phrase, Language::English).map_err(|e| e.to_string()) } -/// Provides a new-type wrapper around `String` that is zeroized on `Drop`. -/// -/// Useful for ensuring that password memory is zeroed-out on drop. -#[derive(Clone, PartialEq, Serialize, Deserialize, Zeroize)] -#[zeroize(drop)] -#[serde(transparent)] -pub struct ZeroizeString(String); - -impl FromStr for ZeroizeString { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(Self(s.to_owned())) - } -} - -impl From for ZeroizeString { - fn from(s: String) -> Self { - Self(s) - } -} - -impl ZeroizeString { - pub fn as_str(&self) -> &str { - &self.0 - } - - /// Remove any number of newline or carriage returns from the end of a vector of bytes. - pub fn without_newlines(&self) -> ZeroizeString { - let stripped_string = self.0.trim_end_matches(['\r', '\n']).into(); - Self(stripped_string) - } -} - -impl AsRef<[u8]> for ZeroizeString { - fn as_ref(&self) -> &[u8] { - self.0.as_bytes() - } -} - pub fn read_mnemonic_from_cli( mnemonic_path: Option, stdin_inputs: bool, @@ -294,54 +250,6 @@ pub fn read_mnemonic_from_cli( mod test { use super::*; - #[test] - fn test_zeroize_strip_off() { - let expected = "hello world"; - - assert_eq!( - ZeroizeString::from("hello world\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\n\n\n\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r\r\r\r\r".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world\r\n\r\n".to_string()) - .without_newlines() - .as_str(), - expected - ); - assert_eq!( - ZeroizeString::from("hello world".to_string()) - .without_newlines() - .as_str(), - expected - ); - } - #[test] fn test_strip_off() { let expected = b"hello world".to_vec(); diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index f228ce5fdfa..a4850fc1c63 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -3,9 +3,7 @@ //! Serves as the source-of-truth of which validators this validator client should attempt (or not //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. -use crate::{ - default_keystore_password_path, read_password_string, write_file_via_temporary, ZeroizeString, -}; +use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; @@ -17,6 +15,7 @@ use std::io; use std::path::{Path, PathBuf}; use types::{graffiti::GraffitiString, Address, PublicKey}; use validator_dir::VOTING_KEYSTORE_FILE; +use zeroize::Zeroizing; /// The file name for the serialized `ValidatorDefinitions` struct. pub const CONFIG_FILENAME: &str = "validator_definitions.yml"; @@ -52,7 +51,7 @@ pub enum Error { /// Defines how a password for a validator keystore will be persisted. pub enum PasswordStorage { /// Store the password in the `validator_definitions.yml` file. - ValidatorDefinitions(ZeroizeString), + ValidatorDefinitions(Zeroizing), /// Store the password in a separate, dedicated file (likely in the "secrets" directory). File(PathBuf), /// Don't store the password at all. @@ -93,7 +92,7 @@ pub enum SigningDefinition { #[serde(skip_serializing_if = "Option::is_none")] voting_keystore_password_path: Option, #[serde(skip_serializing_if = "Option::is_none")] - voting_keystore_password: Option, + voting_keystore_password: Option>, }, /// A validator that defers to a Web3Signer HTTP server for signing. /// @@ -107,7 +106,7 @@ impl SigningDefinition { matches!(self, SigningDefinition::LocalKeystore { .. }) } - pub fn voting_keystore_password(&self) -> Result, Error> { + pub fn voting_keystore_password(&self) -> Result>, Error> { match self { SigningDefinition::LocalKeystore { voting_keystore_password: Some(password), diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 73823ae24e9..f3c166bda9e 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -3,16 +3,15 @@ name = "clap_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] alloy-primitives = { workspace = true } clap = { workspace = true } -hex = { workspace = true } dirs = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } +hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index b4bbbaa4369..19682bf3673 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index a03ac2178f8..953fde1af72 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,13 +7,13 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] +hex = { workspace = true } reqwest = { workspace = true } serde_json = { workspace = true } sha2 = { workspace = true } -hex = { workspace = true } [dependencies] -types = { workspace = true } +ethabi = "16.0.0" ethereum_ssz = { workspace = true } tree_hash = { workspace = true } -ethabi = "16.0.0" +types = { workspace = true } diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml index f7243372618..9c3ced90977 100644 --- a/common/directory/Cargo.toml +++ b/common/directory/Cargo.toml @@ -3,7 +3,6 @@ name = "directory" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index d23a4068f1b..9d6dea100d4 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -3,33 +3,30 @@ name = "eth2" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = { workspace = true } -serde_json = { workspace = true } -ssz_types = { workspace = true } -types = { workspace = true } -reqwest = { workspace = true } -lighthouse_network = { workspace = true } -proto_array = { workspace = true } -ethereum_serde_utils = { workspace = true } +derivative = { workspace = true } eth2_keystore = { workspace = true } -libsecp256k1 = { workspace = true } -ring = { workspace = true } -bytes = { workspace = true } -account_utils = { workspace = true } -sensitive_url = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures-util = "0.3.8" futures = { workspace = true } -store = { workspace = true } -slashing_protection = { workspace = true } +futures-util = "0.3.8" +lighthouse_network = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } -derivative = { workspace = true } +proto_array = { workspace = true } +reqwest = { workspace = true } +reqwest-eventsource = "0.5.0" +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slashing_protection = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +types = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 522c6414eae..12b1538984e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -27,6 +27,7 @@ use reqwest::{ Body, IntoUrl, RequestBuilder, Response, }; pub use reqwest::{StatusCode, Url}; +use reqwest_eventsource::{Event, EventSource}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; @@ -52,6 +53,8 @@ pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), + /// The `reqwest_eventsource` client raised an error. + SseClient(reqwest_eventsource::Error), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -93,6 +96,13 @@ impl Error { pub fn status(&self) -> Option { match self { Error::HttpClient(error) => error.inner().status(), + Error::SseClient(error) => { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error { + Some(*status) + } else { + None + } + } Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), @@ -2592,16 +2602,29 @@ impl BeaconNodeHttpClient { .join(","); path.query_pairs_mut().append_pair("topics", &topic_string); - Ok(self - .client - .get(path) - .send() - .await? - .bytes_stream() - .map(|next| match next { - Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()), - Err(e) => Err(Error::HttpClient(e.into())), - })) + let mut es = EventSource::get(path); + // If we don't await `Event::Open` here, then the consumer + // will not get any Message events until they start awaiting the stream. + // This is a way to register the stream with the sse server before + // message events start getting emitted. + while let Some(event) = es.next().await { + match event { + Ok(Event::Open) => break, + Err(err) => return Err(Error::SseClient(err)), + // This should never happen as we are guaranteed to get the + // Open event before any message starts coming through. + Ok(Event::Message(_)) => continue, + } + } + Ok(Box::pin(es.filter_map(|event| async move { + match event { + Ok(Event::Open) => None, + Ok(Event::Message(message)) => { + Some(EventKind::from_sse_bytes(&message.event, &message.data)) + } + Err(err) => Some(Err(Error::SseClient(err))), + } + }))) } /// `POST validator/duties/sync/{epoch}` diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 309d8228aaf..66dd5d779bd 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -528,9 +528,9 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } - /// - /// Analysis endpoints. - /// + /* + Analysis endpoints. + */ /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot pub async fn get_lighthouse_analysis_block_rewards( diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 67fe77a3157..1d1abcac791 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,6 +1,5 @@ use super::types::*; use crate::Error; -use account_utils::ZeroizeString; use reqwest::{ header::{HeaderMap, HeaderValue}, IntoUrl, @@ -14,6 +13,7 @@ use std::path::Path; pub use reqwest; pub use reqwest::{Response, StatusCode, Url}; use types::graffiti::GraffitiString; +use zeroize::Zeroizing; /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`). @@ -21,7 +21,7 @@ use types::graffiti::GraffitiString; pub struct ValidatorClientHttpClient { client: reqwest::Client, server: SensitiveUrl, - api_token: Option, + api_token: Option>, authorization_header: AuthorizationHeader, } @@ -79,18 +79,18 @@ impl ValidatorClientHttpClient { } /// Get a reference to this client's API token, if any. - pub fn api_token(&self) -> Option<&ZeroizeString> { + pub fn api_token(&self) -> Option<&Zeroizing> { self.api_token.as_ref() } /// Read an API token from the specified `path`, stripping any trailing whitespace. - pub fn load_api_token_from_file(path: &Path) -> Result { + pub fn load_api_token_from_file(path: &Path) -> Result, Error> { let token = fs::read_to_string(path).map_err(|e| Error::TokenReadError(path.into(), e))?; - Ok(ZeroizeString::from(token.trim_end().to_string())) + Ok(token.trim_end().to_string().into()) } /// Add an authentication token to use when making requests. - pub fn add_auth_token(&mut self, token: ZeroizeString) -> Result<(), Error> { + pub fn add_auth_token(&mut self, token: Zeroizing) -> Result<(), Error> { self.api_token = Some(token); self.authorization_header = AuthorizationHeader::Bearer; diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index ee05c298399..ae192312bdb 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,7 +1,7 @@ -use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; use types::{Address, Graffiti, PublicKeyBytes}; +use zeroize::Zeroizing; pub use slashing_protection::interchange::Interchange; @@ -41,7 +41,7 @@ pub struct SingleKeystoreResponse { #[serde(deny_unknown_fields)] pub struct ImportKeystoresRequest { pub keystores: Vec, - pub passwords: Vec, + pub passwords: Vec>, pub slashing_protection: Option, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 1921549bcb5..d7d5a00df51 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -1,13 +1,12 @@ -use account_utils::ZeroizeString; +pub use crate::lighthouse::Health; +pub use crate::lighthouse_vc::std_types::*; +pub use crate::types::{GenericResponse, VersionData}; use eth2_keystore::Keystore; use graffiti::GraffitiString; use serde::{Deserialize, Serialize}; use std::path::PathBuf; - -pub use crate::lighthouse::Health; -pub use crate::lighthouse_vc::std_types::*; -pub use crate::types::{GenericResponse, VersionData}; pub use types::*; +use zeroize::Zeroizing; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { @@ -44,7 +43,7 @@ pub struct ValidatorRequest { #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct CreateValidatorsMnemonicRequest { - pub mnemonic: ZeroizeString, + pub mnemonic: Zeroizing, #[serde(with = "serde_utils::quoted_u32")] pub key_derivation_path_offset: u32, pub validators: Vec, @@ -74,7 +73,7 @@ pub struct CreatedValidator { #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct PostValidatorsResponseData { - pub mnemonic: ZeroizeString, + pub mnemonic: Zeroizing, pub validators: Vec, } @@ -102,7 +101,7 @@ pub struct ValidatorPatchRequest { #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct KeystoreValidatorsPostRequest { - pub password: ZeroizeString, + pub password: Zeroizing, pub enable: bool, pub keystore: Keystore, #[serde(default)] @@ -191,7 +190,7 @@ pub struct SingleExportKeystoresResponse { #[serde(skip_serializing_if = "Option::is_none")] pub validating_keystore: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub validating_keystore_password: Option, + pub validating_keystore_password: Option>, } #[derive(Serialize, Deserialize, Debug)] diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c187399ebd7..a303953a863 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -13,7 +13,7 @@ use serde_json::Value; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::fmt::{self, Display}; -use std::str::{from_utf8, FromStr}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use types::beacon_block_body::KzgCommitments; @@ -1153,24 +1153,7 @@ impl EventKind { } } - pub fn from_sse_bytes(message: &[u8]) -> Result { - let s = from_utf8(message) - .map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?; - - let mut split = s.split('\n'); - let event = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse event tag".to_string()) - })? - .trim_start_matches("event:"); - let data = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse data tag".to_string()) - })? - .trim_start_matches("data:"); - + pub fn from_sse_bytes(event: &str, data: &str) -> Result { match event { "attestation" => Ok(EventKind::Attestation(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Attestation: {:?}", e)), diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index 20c3b0b6f26..509f5ff87e2 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } paste = { workspace = true } +types = { workspace = true } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index f13e90490e4..50386feb8af 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -120,7 +120,7 @@ pub struct Eth2NetArchiveAndDirectory<'a> { pub genesis_state_source: GenesisStateSource, } -impl<'a> Eth2NetArchiveAndDirectory<'a> { +impl Eth2NetArchiveAndDirectory<'_> { /// The directory that should be used to store files downloaded for this net. pub fn dir(&self) -> PathBuf { env::var("CARGO_MANIFEST_DIR") diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5971b934e0c..c19b32014e1 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -3,16 +3,15 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -num-bigint = "0.4.2" +bls = { workspace = true } ethereum_hashing = { workspace = true } hex = { workspace = true } -serde_yaml = { workspace = true } +num-bigint = "0.4.2" serde = { workspace = true } -bls = { workspace = true } +serde_yaml = { workspace = true } [dev-dependencies] base64 = "0.13.0" diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 09cf2072d2f..a255e042291 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -7,25 +7,25 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] -zip = { workspace = true } eth2_config = { workspace = true } +zip = { workspace = true } [dev-dependencies] +ethereum_ssz = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } -ethereum_ssz = { workspace = true } [dependencies] -serde_yaml = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } +bytes = { workspace = true } discv5 = { workspace = true } -reqwest = { workspace = true } +eth2_config = { workspace = true } +kzg = { workspace = true } +logging = { workspace = true } pretty_reqwest_error = { workspace = true } -sha2 = { workspace = true } -url = { workspace = true } +reqwest = { workspace = true } sensitive_url = { workspace = true } +serde_yaml = { workspace = true } +sha2 = { workspace = true } slog = { workspace = true } -logging = { workspace = true } -bytes = { workspace = true } -kzg = { workspace = true } +types = { workspace = true } +url = { workspace = true } diff --git a/common/eth2_wallet_manager/Cargo.toml b/common/eth2_wallet_manager/Cargo.toml index f4717570653..a6eb24c78c2 100644 --- a/common/eth2_wallet_manager/Cargo.toml +++ b/common/eth2_wallet_manager/Cargo.toml @@ -3,7 +3,6 @@ name = "eth2_wallet_manager" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2_wallet_manager/src/wallet_manager.rs b/common/eth2_wallet_manager/src/wallet_manager.rs index 3dd419a48b5..c988ca4135e 100644 --- a/common/eth2_wallet_manager/src/wallet_manager.rs +++ b/common/eth2_wallet_manager/src/wallet_manager.rs @@ -296,10 +296,10 @@ mod tests { ) .expect("should create first wallet"); - let uuid = w.wallet().uuid().clone(); + let uuid = *w.wallet().uuid(); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), 0, "should start wallet with nextaccount 0" ); @@ -308,7 +308,7 @@ mod tests { w.next_validator(WALLET_PASSWORD, &[50; 32], &[51; 32]) .expect("should create validator"); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), i, "should update wallet with nextaccount {}", i @@ -333,54 +333,54 @@ mod tests { let base_dir = dir.path(); let mgr = WalletManager::open(base_dir).unwrap(); - let uuid_a = create_wallet(&mgr, 0).wallet().uuid().clone(); - let uuid_b = create_wallet(&mgr, 1).wallet().uuid().clone(); + let uuid_a = *create_wallet(&mgr, 0).wallet().uuid(); + let uuid_b = *create_wallet(&mgr, 1).wallet().uuid(); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile should exist" ); drop(locked_a); assert!( - !lockfile_path(&base_dir, &uuid_a).exists(), + !lockfile_path(base_dir, &uuid_a).exists(), "lockfile have been cleaned up" ); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); - let locked_b = LockedWallet::open(&base_dir, &uuid_b).expect("should open wallet b"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); + let locked_b = LockedWallet::open(base_dir, &uuid_b).expect("should open wallet b"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile a should exist" ); assert!( - lockfile_path(&base_dir, &uuid_b).exists(), + lockfile_path(base_dir, &uuid_b).exists(), "lockfile b should exist" ); - match LockedWallet::open(&base_dir, &uuid_a) { + match LockedWallet::open(base_dir, &uuid_a) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_a); - LockedWallet::open(&base_dir, &uuid_a) + LockedWallet::open(base_dir, &uuid_a) .expect("should open wallet a after previous instance is dropped"); - match LockedWallet::open(&base_dir, &uuid_b) { + match LockedWallet::open(base_dir, &uuid_b) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_b); - LockedWallet::open(&base_dir, &uuid_b) + LockedWallet::open(base_dir, &uuid_b) .expect("should open wallet a after previous instance is dropped"); } } diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 3c4f9fe50ce..164e3e47a7a 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -3,7 +3,6 @@ name = "lighthouse_version" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index f988dd86b1f..0751bdadff5 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.3.0-", - fallback = "Lighthouse/v5.3.0" + prefix = "Lighthouse/v6.0.1-", + fallback = "Lighthouse/v6.0.1" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 73cbdf44d42..b2829a48d8f 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -19,7 +19,7 @@ sloggers = { workspace = true } take_mut = "0.2.2" tokio = { workspace = true, features = [ "time" ] } tracing = "0.1" +tracing-appender = { workspace = true } tracing-core = { workspace = true } tracing-log = { workspace = true } tracing-subscriber = { workspace = true } -tracing-appender = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 4bb37392984..7fe7f79506c 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -105,7 +105,7 @@ impl<'a> AlignedRecordDecorator<'a> { } } -impl<'a> Write for AlignedRecordDecorator<'a> { +impl Write for AlignedRecordDecorator<'_> { fn write(&mut self, buf: &[u8]) -> Result { if buf.iter().any(u8::is_ascii_control) { let filtered = buf @@ -124,7 +124,7 @@ impl<'a> Write for AlignedRecordDecorator<'a> { } } -impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> { +impl slog_term::RecordDecorator for AlignedRecordDecorator<'_> { fn reset(&mut self) -> Result<()> { self.message_active = false; self.message_count = 0; diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 79a07eed166..64fb7b9aadd 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -metrics = { workspace = true } libc = "0.2.79" +metrics = { workspace = true } parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 2da32c307ee..5008c86e858 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -3,19 +3,18 @@ name = "monitoring_api" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { workspace = true } -task_executor = { workspace = true } -tokio = { workspace = true } eth2 = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } -slog = { workspace = true } -store = { workspace = true } regex = { workspace = true } +reqwest = { workspace = true } sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } +store = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/common/oneshot_broadcast/Cargo.toml b/common/oneshot_broadcast/Cargo.toml index 12c9b40bc85..8a358ef8510 100644 --- a/common/oneshot_broadcast/Cargo.toml +++ b/common/oneshot_broadcast/Cargo.toml @@ -2,7 +2,6 @@ name = "oneshot_broadcast" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml index dc79832cd3d..4311601bcdd 100644 --- a/common/pretty_reqwest_error/Cargo.toml +++ b/common/pretty_reqwest_error/Cargo.toml @@ -2,7 +2,6 @@ name = "pretty_reqwest_error" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index d218c8d93a1..ff562097225 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -3,9 +3,8 @@ name = "sensitive_url" version = "0.1.0" authors = ["Mac L "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -url = { workspace = true } serde = { workspace = true } +url = { workspace = true } diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index c2f330cd507..2e1982efb1a 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } +types = { workspace = true } diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index be339f27792..034683f72e0 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -5,7 +5,7 @@ edition = { workspace = true } [dependencies] lighthouse_network = { workspace = true } -types = { workspace = true } -sysinfo = { workspace = true } -serde = { workspace = true } parking_lot = { workspace = true } +serde = { workspace = true } +sysinfo = { workspace = true } +types = { workspace = true } diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index 34311898420..9f351e943bb 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -235,14 +235,14 @@ pub fn observe_nat() -> NatState { let libp2p_ipv4 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["libp2p_ipv4"], ) .map(|g| g.get() == 1) .unwrap_or_default(); let libp2p_ipv6 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["libp2p_ipv6"], ) .map(|g| g.get() == 1) .unwrap_or_default(); diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 26bcd7b339c..c1ac4b55a91 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -11,10 +11,10 @@ tracing = ["dep:tracing"] [dependencies] async-channel = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -slog = { workspace = true, optional = true } futures = { workspace = true } +logging = { workspace = true, optional = true } metrics = { workspace = true } +slog = { workspace = true, optional = true } sloggers = { workspace = true, optional = true } -logging = { workspace = true, optional = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true, optional = true } diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index 79308797a4b..b38d5ef63a5 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait" proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 95dbf591861..2d771cd6008 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -2,7 +2,6 @@ name = "unused_port" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index ae8742fe07b..773431c93c6 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -6,21 +6,20 @@ edition = { workspace = true } [features] insecure_keys = [] - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] bls = { workspace = true } +deposit_contract = { workspace = true } +derivative = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } -types = { workspace = true } -rand = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } hex = { workspace = true } -derivative = { workspace = true } lockfile = { workspace = true } -directory = { workspace = true } +rand = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/common/validator_dir/src/insecure_keys.rs b/common/validator_dir/src/insecure_keys.rs index f8cc51da63e..83720bb58cd 100644 --- a/common/validator_dir/src/insecure_keys.rs +++ b/common/validator_dir/src/insecure_keys.rs @@ -15,7 +15,7 @@ use types::test_utils::generate_deterministic_keypair; /// A very weak password with which to encrypt the keystores. pub const INSECURE_PASSWORD: &[u8] = &[50; 51]; -impl<'a> Builder<'a> { +impl Builder<'_> { /// Generate the voting keystore using a deterministic, well-known, **unsafe** keypair. /// /// **NEVER** use these keys in production! diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index a9407c392d9..4a3cde54a9a 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "warp_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } -eth2 = { workspace = true } -types = { workspace = true } beacon_chain = { workspace = true } -state_processing = { workspace = true } +bytes = { workspace = true } +eth2 = { workspace = true } +headers = "0.3.2" +metrics = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } +serde_array_query = "0.1.0" serde_json = { workspace = true } +state_processing = { workspace = true } tokio = { workspace = true } -headers = "0.3.2" -metrics = { workspace = true } -serde_array_query = "0.1.0" -bytes = { workspace = true } +types = { workspace = true } +warp = { workspace = true } diff --git a/consensus/fixed_bytes/Cargo.toml b/consensus/fixed_bytes/Cargo.toml index e5201a04551..ab29adfb1b9 100644 --- a/consensus/fixed_bytes/Cargo.toml +++ b/consensus/fixed_bytes/Cargo.toml @@ -3,7 +3,6 @@ name = "fixed_bytes" version = "0.1.0" authors = ["Eitan Seri-Levi "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index b32e0aa6656..3bd18e922aa 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -3,17 +3,16 @@ name = "fork_choice" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -types = { workspace = true } -state_processing = { workspace = true } -proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } metrics = { workspace = true } +proto_array = { workspace = true } slog = { workspace = true } +state_processing = { workspace = true } +types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 29265e34e4d..ef017159a02 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1156,18 +1156,20 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { }; // recreate the chain exactly - ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) - .await - .unwrap() - .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) - .await - .unwrap() - .apply_blocks(1) - .await - .assert_finalized_epoch(5) - .assert_shutdown_signal_not_sent(); + Box::pin( + ForkChoiceTest::new_with_chain_config(chain_config.clone()) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .skip_slots(E::slots_per_epoch() as usize) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await + .unwrap() + .apply_blocks(1), + ) + .await + .assert_finalized_epoch(5) + .assert_shutdown_signal_not_sent(); } #[tokio::test] diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index e99d1af8e56..c639dfce8d6 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] -yaml-rust2 = "0.8" hex = { workspace = true } +yaml-rust2 = "0.8" diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 99f98cf545f..bd6757c0fad 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -9,10 +9,10 @@ name = "proto_array" path = "src/bin.rs" [dependencies] -types = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } -safe_arith = { workspace = true } superstruct = { workspace = true } +types = { workspace = true } diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml index 6f2e4b811c7..9ac9fe28d3a 100644 --- a/consensus/safe_arith/Cargo.toml +++ b/consensus/safe_arith/Cargo.toml @@ -3,7 +3,6 @@ name = "safe_arith" version = "0.1.0" authors = ["Michael Sproul "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index b7f6ef7b2a9..502ffe3cf65 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -5,30 +5,30 @@ authors = ["Paul Hauner ", "Michael Sproul BlockReplayer<'a, E, Error, StateRootIterDefault> +impl BlockReplayer<'_, E, Error, StateRootIterDefault> where E: EthSpec, Error: From, diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index b131f7679a3..842adce431c 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -103,14 +103,14 @@ pub mod attesting_indices_electra { let committee_count_per_slot = committees.len() as u64; let mut participant_count = 0; - for index in committee_indices { + for committee_index in committee_indices { let beacon_committee = committees - .get(index as usize) - .ok_or(Error::NoCommitteeFound(index))?; + .get(committee_index as usize) + .ok_or(Error::NoCommitteeFound(committee_index))?; // This check is new to the spec's `process_attestation` in Electra. - if index >= committee_count_per_slot { - return Err(BeaconStateError::InvalidCommitteeIndex(index)); + if committee_index >= committee_count_per_slot { + return Err(BeaconStateError::InvalidCommitteeIndex(committee_index)); } participant_count.safe_add_assign(beacon_committee.committee.len() as u64)?; let committee_attesters = beacon_committee @@ -127,6 +127,12 @@ pub mod attesting_indices_electra { }) .collect::>(); + // Require at least a single non-zero bit for each attesting committee bitfield. + // This check is new to the spec's `process_attestation` in Electra. + if committee_attesters.is_empty() { + return Err(BeaconStateError::EmptyCommittee); + } + attesting_indices.extend(committee_attesters); committee_offset.safe_add_assign(beacon_committee.committee.len())?; } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index f289b6e0817..436f4934b90 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -514,6 +514,7 @@ pub fn get_expected_withdrawals( // Consume pending partial withdrawals let partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + let mut partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -546,8 +547,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } + partial_withdrawals_count.safe_add_assign(1)?; } - Some(withdrawals.len()) + Some(partial_withdrawals_count) } else { None }; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index a53dc15126f..22d8592364c 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -7,7 +7,6 @@ use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; use types::typenum::U33; -use types::validator::is_compounding_withdrawal_credential; pub fn process_operations>( state: &mut BeaconState, @@ -378,7 +377,7 @@ pub fn process_deposits( if state.eth1_deposit_index() < eth1_deposit_index_limit { let expected_deposit_len = std::cmp::min( E::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, + eth1_deposit_index_limit.safe_sub(state.eth1_deposit_index())?, ); block_verify!( deposits.len() as u64 == expected_deposit_len, @@ -450,39 +449,46 @@ pub fn apply_deposit( if let Some(index) = validator_index { // [Modified in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { index, amount })?; - - let validator = state - .validators() - .get(index as usize) - .ok_or(BeaconStateError::UnknownValidator(index as usize))?; - - if is_compounding_withdrawal_credential(deposit_data.withdrawal_credentials, spec) - && validator.has_eth1_withdrawal_credential(spec) - && is_valid_deposit_signature(&deposit_data, spec).is_ok() - { - state.switch_to_compounding_validator(index as usize, spec)?; - } + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request + })?; } else { // Update the existing validator balance. increase_balance(state, index as usize, amount)?; } - } else { + } + // New validator + else { // The signature should be checked for new validators. Return early for a bad // signature. if is_valid_deposit_signature(&deposit_data, spec).is_err() { return Ok(()); } - state.add_validator_to_registry(&deposit_data, spec)?; - let new_validator_index = state.validators().len().safe_sub(1)? as u64; + state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + if state.fork_name_unchecked() >= ForkName::Electra { + 0 + } else { + amount + }, + spec, + )?; // [New in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index, + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request })?; } } @@ -596,13 +602,18 @@ pub fn process_deposit_requests( if state.deposit_requests_start_index()? == spec.unset_deposit_requests_start_index { *state.deposit_requests_start_index_mut()? = request.index } - let deposit_data = DepositData { - pubkey: request.pubkey, - withdrawal_credentials: request.withdrawal_credentials, - amount: request.amount, - signature: request.signature.clone().into(), - }; - apply_deposit(state, deposit_data, None, false, spec)? + let slot = state.slot(); + + // [New in Electra:EIP7251] + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: request.pubkey, + withdrawal_credentials: request.withdrawal_credentials, + amount: request.amount, + signature: request.signature.clone(), + slot, + })?; + } } Ok(()) @@ -621,11 +632,84 @@ pub fn process_consolidation_requests( Ok(()) } +fn is_valid_switch_to_compounding_request( + state: &BeaconState, + consolidation_request: &ConsolidationRequest, + spec: &ChainSpec, +) -> Result { + // Switch to compounding requires source and target be equal + if consolidation_request.source_pubkey != consolidation_request.target_pubkey { + return Ok(false); + } + + // Verify pubkey exists + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist + return Ok(false); + }; + + let source_validator = state.get_validator(source_index)?; + // Verify the source withdrawal credentials + // Note: We need to specifically check for eth1 withdrawal credentials here + // If the validator is already compounding, the compounding request is not valid. + if let Some(withdrawal_address) = source_validator + .has_eth1_withdrawal_credential(spec) + .then(|| { + source_validator + .withdrawal_credentials + .as_slice() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + { + if withdrawal_address != consolidation_request.source_address { + return Ok(false); + } + } else { + // Source doesn't have eth1 withdrawal credentials + return Ok(false); + } + + // Verify the source is active + let current_epoch = state.current_epoch(); + if !source_validator.is_active_at(current_epoch) { + return Ok(false); + } + // Verify exits for source has not been initiated + if source_validator.exit_epoch != spec.far_future_epoch { + return Ok(false); + } + + Ok(true) +} + pub fn process_consolidation_request( state: &mut BeaconState, consolidation_request: &ConsolidationRequest, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if is_valid_switch_to_compounding_request(state, consolidation_request, spec)? { + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist. This is unreachable as `is_valid_switch_to_compounding_request` + // will return false in that case. + return Ok(()); + }; + state.switch_to_compounding_validator(source_index, spec)?; + return Ok(()); + } + + // Verify that source != target, so a consolidation cannot be used as an exit. + if consolidation_request.source_pubkey == consolidation_request.target_pubkey { + return Ok(()); + } + // If the pending consolidations queue is full, consolidation requests are ignored if state.pending_consolidations()?.len() == E::PendingConsolidationsLimit::to_usize() { return Ok(()); @@ -649,10 +733,6 @@ pub fn process_consolidation_request( // target validator doesn't exist return Ok(()); }; - // Verify that source != target, so a consolidation cannot be used as an exit. - if source_index == target_index { - return Ok(()); - } let source_validator = state.get_validator(source_index)?; // Verify the source withdrawal credentials @@ -699,5 +779,10 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; + let target_validator = state.get_validator(target_index)?; + // Churn any target excess active balance of target and raise its max + if target_validator.has_eth1_withdrawal_credential(spec) { + state.switch_to_compounding_validator(target_index, spec)?; + } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index b6c9dbea521..f45c55a7acf 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -28,6 +28,7 @@ pub enum EpochProcessingError { SinglePassMissingActivationQueue, MissingEarliestExitEpoch, MissingExitBalanceToConsume, + PendingDepositsLogicError, } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index fcb480a37cf..904e68e3686 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -4,6 +4,7 @@ use crate::{ update_progressive_balances_cache::initialize_progressive_balances_cache, }, epoch_cache::{initialize_epoch_cache, PreEpochCache}, + per_block_processing::is_valid_deposit_signature, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; @@ -16,9 +17,9 @@ use types::{ TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, - ExitCache, ForkName, List, ParticipationFlags, PendingBalanceDeposit, ProgressiveBalancesCache, - RelativeEpoch, Unsigned, Validator, + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, + ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, }; pub struct SinglePassConfig { @@ -26,7 +27,7 @@ pub struct SinglePassConfig { pub rewards_and_penalties: bool, pub registry_updates: bool, pub slashings: bool, - pub pending_balance_deposits: bool, + pub pending_deposits: bool, pub pending_consolidations: bool, pub effective_balance_updates: bool, } @@ -44,7 +45,7 @@ impl SinglePassConfig { rewards_and_penalties: true, registry_updates: true, slashings: true, - pending_balance_deposits: true, + pending_deposits: true, pending_consolidations: true, effective_balance_updates: true, } @@ -56,7 +57,7 @@ impl SinglePassConfig { rewards_and_penalties: false, registry_updates: false, slashings: false, - pending_balance_deposits: false, + pending_deposits: false, pending_consolidations: false, effective_balance_updates: false, } @@ -85,15 +86,17 @@ struct SlashingsContext { penalty_per_effective_balance_increment: u64, } -struct PendingBalanceDepositsContext { +struct PendingDepositsContext { /// The value to set `next_deposit_index` to *after* processing completes. next_deposit_index: usize, /// The value to set `deposit_balance_to_consume` to *after* processing completes. deposit_balance_to_consume: u64, /// Total balance increases for each validator due to pending balance deposits. validator_deposits_to_process: HashMap, - /// The deposits to append to `pending_balance_deposits` after processing all applicable deposits. - deposits_to_postpone: Vec, + /// The deposits to append to `pending_deposits` after processing all applicable deposits. + deposits_to_postpone: Vec, + /// New validators to be added to the state *after* processing completes. + new_validator_deposits: Vec, } struct EffectiveBalancesContext { @@ -138,6 +141,7 @@ pub fn process_epoch_single_pass( state.build_exit_cache(spec)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.update_pubkey_cache()?; let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -163,12 +167,11 @@ pub fn process_epoch_single_pass( let slashings_ctxt = &SlashingsContext::new(state, state_ctxt, spec)?; let mut next_epoch_cache = PreEpochCache::new_for_next_epoch(state)?; - let pending_balance_deposits_ctxt = - if fork_name.electra_enabled() && conf.pending_balance_deposits { - Some(PendingBalanceDepositsContext::new(state, spec)?) - } else { - None - }; + let pending_deposits_ctxt = if fork_name.electra_enabled() && conf.pending_deposits { + Some(PendingDepositsContext::new(state, spec, &conf)?) + } else { + None + }; let mut earliest_exit_epoch = state.earliest_exit_epoch().ok(); let mut exit_balance_to_consume = state.exit_balance_to_consume().ok(); @@ -303,9 +306,9 @@ pub fn process_epoch_single_pass( process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } - // `process_pending_balance_deposits` - if let Some(pending_balance_deposits_ctxt) = &pending_balance_deposits_ctxt { - process_pending_balance_deposits_for_validator( + // `process_pending_deposits` + if let Some(pending_balance_deposits_ctxt) = &pending_deposits_ctxt { + process_pending_deposits_for_validator( &mut balance, validator_info, pending_balance_deposits_ctxt, @@ -342,20 +345,84 @@ pub fn process_epoch_single_pass( // Finish processing pending balance deposits if relevant. // // This *could* be reordered after `process_pending_consolidations` which pushes only to the end - // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used + // of the `pending_deposits` list. But we may as well preserve the write ordering used // by the spec and do this first. - if let Some(ctxt) = pending_balance_deposits_ctxt { - let mut new_pending_balance_deposits = List::try_from_iter( + if let Some(ctxt) = pending_deposits_ctxt { + let mut new_balance_deposits = List::try_from_iter( state - .pending_balance_deposits()? + .pending_deposits()? .iter_from(ctxt.next_deposit_index)? .cloned(), )?; for deposit in ctxt.deposits_to_postpone { - new_pending_balance_deposits.push(deposit)?; + new_balance_deposits.push(deposit)?; } - *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; + *state.pending_deposits_mut()? = new_balance_deposits; *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; + + // `new_validator_deposits` may contain multiple deposits with the same pubkey where + // the first deposit creates the new validator and the others are topups. + // Each item in the vec is a (pubkey, validator_index) + let mut added_validators = Vec::new(); + for deposit in ctxt.new_validator_deposits { + let deposit_data = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature, + }; + // Only check the signature if this is the first deposit for the validator, + // following the logic from `apply_pending_deposit` in the spec. + if let Some(validator_index) = state.get_validator_index(&deposit_data.pubkey)? { + state + .get_balance_mut(validator_index)? + .safe_add_assign(deposit_data.amount)?; + } else if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + // Apply the new deposit to the state + let validator_index = state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + deposit_data.amount, + spec, + )?; + added_validators.push((deposit_data.pubkey, validator_index)); + } + } + if conf.effective_balance_updates { + // Re-process effective balance updates for validators affected by top-up of new validators. + let ( + validators, + balances, + _, + current_epoch_participation, + _, + progressive_balances, + _, + _, + ) = state.mutable_validator_fields()?; + for (_, validator_index) in added_validators.iter() { + let balance = *balances + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let mut validator = validators + .get_cow(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let validator_current_epoch_participation = *current_epoch_participation + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + process_single_effective_balance_update( + *validator_index, + balance, + &mut validator, + validator_current_epoch_participation, + &mut next_epoch_cache, + progressive_balances, + effective_balances_ctxt, + state_ctxt, + spec, + )?; + } + } } // Process consolidations outside the single-pass loop, as they depend on balances for multiple @@ -819,8 +886,12 @@ fn process_single_slashing( Ok(()) } -impl PendingBalanceDepositsContext { - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { +impl PendingDepositsContext { + fn new( + state: &BeaconState, + spec: &ChainSpec, + config: &SinglePassConfig, + ) -> Result { let available_for_processing = state .deposit_balance_to_consume()? .safe_add(state.get_activation_exit_churn_limit(spec)?)?; @@ -830,10 +901,31 @@ impl PendingBalanceDepositsContext { let mut next_deposit_index = 0; let mut validator_deposits_to_process = HashMap::new(); let mut deposits_to_postpone = vec![]; - - let pending_balance_deposits = state.pending_balance_deposits()?; - - for deposit in pending_balance_deposits.iter() { + let mut new_validator_deposits = vec![]; + let mut is_churn_limit_reached = false; + let finalized_slot = state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); + + let pending_deposits = state.pending_deposits()?; + + for deposit in pending_deposits.iter() { + // Do not process deposit requests if the Eth1 bridge deposits are not yet applied. + if deposit.slot > spec.genesis_slot + && state.eth1_deposit_index() < state.deposit_requests_start_index()? + { + break; + } + // Do not process is deposit slot has not been finalized. + if deposit.slot > finalized_slot { + break; + } + // Do not process if we have reached the limit for the number of deposits + // processed in an epoch. + if next_deposit_index >= E::max_pending_deposits_per_epoch() { + break; + } // We have to do a bit of indexing into `validators` here, but I can't see any way // around that without changing the spec. // @@ -844,48 +936,70 @@ impl PendingBalanceDepositsContext { // take, just whether it is non-default. Nor do we need to know the value of // `withdrawable_epoch`, because `next_epoch <= withdrawable_epoch` will evaluate to // `true` both for the actual value & the default placeholder value (`FAR_FUTURE_EPOCH`). - let validator = state.get_validator(deposit.index as usize)?; - let already_exited = validator.exit_epoch < spec.far_future_epoch; - // In the spec process_registry_updates is called before process_pending_balance_deposits - // so we must account for process_registry_updates ejecting the validator for low balance - // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective - // balance update does not happen until *after* the registry update, so we don't need to - // account for changes to the effective balance that would push it below the ejection - // balance here. - let will_be_exited = validator.is_active_at(current_epoch) - && validator.effective_balance <= spec.ejection_balance; - if already_exited || will_be_exited { - if next_epoch <= validator.withdrawable_epoch { - deposits_to_postpone.push(deposit.clone()); - } else { - // Deposited balance will never become active. Increase balance but do not - // consume churn. - validator_deposits_to_process - .entry(deposit.index as usize) - .or_insert(0) - .safe_add_assign(deposit.amount)?; - } - } else { - // Deposit does not fit in the churn, no more deposit processing in this epoch. - if processed_amount.safe_add(deposit.amount)? > available_for_processing { - break; - } - // Deposit fits in the churn, process it. Increase balance and consume churn. + let mut is_validator_exited = false; + let mut is_validator_withdrawn = false; + let opt_validator_index = state.pubkey_cache().get(&deposit.pubkey); + if let Some(validator_index) = opt_validator_index { + let validator = state.get_validator(validator_index)?; + let already_exited = validator.exit_epoch < spec.far_future_epoch; + // In the spec process_registry_updates is called before process_pending_deposits + // so we must account for process_registry_updates ejecting the validator for low balance + // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective + // balance update does not happen until *after* the registry update, so we don't need to + // account for changes to the effective balance that would push it below the ejection + // balance here. + // Note: we only consider this if registry_updates are enabled in the config. + // EF tests require us to run epoch_processing functions in isolation. + let will_be_exited = config.registry_updates + && (validator.is_active_at(current_epoch) + && validator.effective_balance <= spec.ejection_balance); + is_validator_exited = already_exited || will_be_exited; + is_validator_withdrawn = validator.withdrawable_epoch < next_epoch; + } + + if is_validator_withdrawn { + // Deposited balance will never become active. Queue a balance increase but do not + // consume churn. Validator index must be known if the validator is known to be + // withdrawn (see calculation of `is_validator_withdrawn` above). + let validator_index = + opt_validator_index.ok_or(Error::PendingDepositsLogicError)?; validator_deposits_to_process - .entry(deposit.index as usize) + .entry(validator_index) .or_insert(0) .safe_add_assign(deposit.amount)?; + } else if is_validator_exited { + // Validator is exiting, postpone the deposit until after withdrawable epoch + deposits_to_postpone.push(deposit.clone()); + } else { + // Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch. + is_churn_limit_reached = + processed_amount.safe_add(deposit.amount)? > available_for_processing; + if is_churn_limit_reached { + break; + } processed_amount.safe_add_assign(deposit.amount)?; + + // Deposit fits in the churn, process it. Increase balance and consume churn. + if let Some(validator_index) = state.pubkey_cache().get(&deposit.pubkey) { + validator_deposits_to_process + .entry(validator_index) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + } else { + // The `PendingDeposit` is for a new validator + new_validator_deposits.push(deposit.clone()); + } } // Regardless of how the deposit was handled, we move on in the queue. next_deposit_index.safe_add_assign(1)?; } - let deposit_balance_to_consume = if next_deposit_index == pending_balance_deposits.len() { - 0 - } else { + // Accumulate churn only if the churn limit has been hit. + let deposit_balance_to_consume = if is_churn_limit_reached { available_for_processing.safe_sub(processed_amount)? + } else { + 0 }; Ok(Self { @@ -893,14 +1007,15 @@ impl PendingBalanceDepositsContext { deposit_balance_to_consume, validator_deposits_to_process, deposits_to_postpone, + new_validator_deposits, }) } } -fn process_pending_balance_deposits_for_validator( +fn process_pending_deposits_for_validator( balance: &mut Cow, validator_info: &ValidatorInfo, - pending_balance_deposits_ctxt: &PendingBalanceDepositsContext, + pending_balance_deposits_ctxt: &PendingDepositsContext, ) -> Result<(), Error> { if let Some(deposit_amount) = pending_balance_deposits_ctxt .validator_deposits_to_process @@ -941,21 +1056,20 @@ fn process_pending_consolidations( break; } - // Calculate the active balance while we have the source validator loaded. This is a safe - // reordering. - let source_balance = *state - .balances() - .get(source_index) - .ok_or(BeaconStateError::UnknownValidator(source_index))?; - let active_balance = - source_validator.get_active_balance(source_balance, spec, state_ctxt.fork_name); - - // Churn any target excess active balance of target and raise its max. - state.switch_to_compounding_validator(target_index, spec)?; + // Calculate the consolidated balance + let max_effective_balance = + source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); + let source_effective_balance = std::cmp::min( + *state + .balances() + .get(source_index) + .ok_or(BeaconStateError::UnknownValidator(source_index))?, + max_effective_balance, + ); // Move active balance to target. Excess balance is withdrawable. - decrease_balance(state, source_index, active_balance)?; - increase_balance(state, target_index, active_balance)?; + decrease_balance(state, source_index, source_effective_balance)?; + increase_balance(state, target_index, source_effective_balance)?; affected_validators.insert(source_index); affected_validators.insert(target_index); diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e532d9f107..1e64ef28978 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,8 +1,10 @@ +use bls::Signature; +use itertools::Itertools; use safe_arith::SafeArith; use std::mem; use types::{ BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, Epoch, EpochCache, - EthSpec, Fork, + EthSpec, Fork, PendingDeposit, }; /// Transform a `Deneb` state into an `Electra` state. @@ -38,29 +40,44 @@ pub fn upgrade_to_electra( // Add validators that are not yet active to pending balance deposits let validators = post.validators().clone(); - let mut pre_activation = validators + let pre_activation = validators .iter() .enumerate() .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) .collect::>(); - // Sort the indices by activation_eligibility_epoch and then by index - pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { - if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { - index_a.cmp(index_b) - } else { - val_a - .activation_eligibility_epoch - .cmp(&val_b.activation_eligibility_epoch) - } - }); - // Process validators to queue entire balance and reset them for (index, _) in pre_activation { - post.queue_entire_balance_and_reset_validator(index, spec)?; + let balance = post + .balances_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + let balance_copy = *balance; + *balance = 0_u64; + + let validator = post + .validators_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + validator.effective_balance = 0; + validator.activation_eligibility_epoch = spec.far_future_epoch; + let pubkey = validator.pubkey; + let withdrawal_credentials = validator.withdrawal_credentials; + + post.pending_deposits_mut()? + .push(PendingDeposit { + pubkey, + withdrawal_credentials, + amount: balance_copy, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + }) + .map_err(Error::MilhouseError)?; } // Ensure early adopters of compounding credentials go through the activation churn + let validators = post.validators().clone(); for (index, validator) in validators.iter().enumerate() { if validator.has_compounding_withdrawal_credential(spec) { post.queue_excess_active_balance(index, spec)?; @@ -137,7 +154,7 @@ pub fn upgrade_state_to_electra( earliest_exit_epoch, consolidation_balance_to_consume: 0, earliest_consolidation_epoch, - pending_balance_deposits: Default::default(), + pending_deposits: Default::default(), pending_partial_withdrawals: Default::default(), pending_consolidations: Default::default(), // Caches diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 223b12e7684..6edd8d38925 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -146,4 +146,4 @@ impl AggregateAndProof { } impl SignedRoot for AggregateAndProof {} -impl<'a, E: EthSpec> SignedRoot for AggregateAndProofRef<'a, E> {} +impl SignedRoot for AggregateAndProofRef<'_, E> {} diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 3801a2b5d2b..190964736fe 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -233,7 +233,7 @@ impl Attestation { } } -impl<'a, E: EthSpec> AttestationRef<'a, E> { +impl AttestationRef<'_, E> { pub fn clone_as_attestation(self) -> Attestation { match self { Self::Base(att) => Attestation::Base(att.clone()), @@ -422,7 +422,7 @@ impl SlotData for Attestation { } } -impl<'a, E: EthSpec> SlotData for AttestationRef<'a, E> { +impl SlotData for AttestationRef<'_, E> { fn get_slot(&self) -> Slot { self.data().slot } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index a2983035138..801b7dd1c78 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -80,10 +80,7 @@ pub struct BeaconBlock = FullPayload pub type BlindedBeaconBlock = BeaconBlock>; impl> SignedRoot for BeaconBlock {} -impl<'a, E: EthSpec, Payload: AbstractExecPayload> SignedRoot - for BeaconBlockRef<'a, E, Payload> -{ -} +impl> SignedRoot for BeaconBlockRef<'_, E, Payload> {} /// Empty block trait for each block variant to implement. pub trait EmptyBlock { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 1090b2cc031..b896dc46932 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -380,7 +380,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRefMut<'a, } } -impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { +impl> BeaconBlockBodyRef<'_, E, Payload> { /// Get the fork_name of this object pub fn fork_name(self) -> ForkName { match self { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/beacon_committee.rs index ad293c3a3bb..bdb91cd6e68 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/beacon_committee.rs @@ -7,7 +7,7 @@ pub struct BeaconCommittee<'a> { pub committee: &'a [usize], } -impl<'a> BeaconCommittee<'a> { +impl BeaconCommittee<'_> { pub fn into_owned(self) -> OwnedBeaconCommittee { OwnedBeaconCommittee { slot: self.slot, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 833231dca39..ad4484b86ae 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -59,6 +59,7 @@ pub enum Error { UnknownValidator(usize), UnableToDetermineProducer, InvalidBitfield, + EmptyCommittee, ValidatorIsWithdrawable, ValidatorIsInactive { val_index: usize, @@ -509,7 +510,7 @@ where #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] @@ -1547,19 +1548,23 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Add a validator to the registry and return the validator index that was allocated for it. pub fn add_validator_to_registry( &mut self, - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, + amount: u64, spec: &ChainSpec, - ) -> Result<(), Error> { - let fork = self.fork_name_unchecked(); - let amount = if fork.electra_enabled() { - 0 - } else { - deposit_data.amount - }; - self.validators_mut() - .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + ) -> Result { + let index = self.validators().len(); + let fork_name = self.fork_name_unchecked(); + self.validators_mut().push(Validator::from_deposit( + pubkey, + withdrawal_credentials, + amount, + fork_name, + spec, + ))?; self.balances_mut().push(amount)?; // Altair or later initializations. @@ -1573,7 +1578,20 @@ impl BeaconState { inactivity_scores.push(0)?; } - Ok(()) + // Keep the pubkey cache up to date if it was up to date prior to this call. + // + // Doing this here while we know the pubkey and index is marginally quicker than doing it in + // a call to `update_pubkey_cache` later because we don't need to index into the validators + // tree again. + let pubkey_cache = self.pubkey_cache_mut(); + if pubkey_cache.len() == index { + let success = pubkey_cache.insert(pubkey, index); + if !success { + return Err(Error::PubkeyCacheInconsistent); + } + } + + Ok(index) } /// Safe copy-on-write accessor for the `validators` list. @@ -1780,19 +1798,6 @@ impl BeaconState { } } - /// Get the number of outstanding deposits. - /// - /// Returns `Err` if the state is invalid. - pub fn get_outstanding_deposit_len(&self) -> Result { - self.eth1_data() - .deposit_count - .checked_sub(self.eth1_deposit_index()) - .ok_or(Error::InvalidDepositState { - deposit_count: self.eth1_data().deposit_count, - deposit_index: self.eth1_deposit_index(), - }) - } - /// Build all caches (except the tree hash cache), if they need to be built. pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; @@ -2149,27 +2154,6 @@ impl BeaconState { .map_err(Into::into) } - /// Get active balance for the given `validator_index`. - pub fn get_active_balance( - &self, - validator_index: usize, - spec: &ChainSpec, - current_fork: ForkName, - ) -> Result { - let max_effective_balance = self - .validators() - .get(validator_index) - .map(|validator| validator.get_max_effective_balance(spec, current_fork)) - .ok_or(Error::UnknownValidator(validator_index))?; - Ok(std::cmp::min( - *self - .balances() - .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?, - max_effective_balance, - )) - } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { let mut pending_balance = 0; for withdrawal in self @@ -2196,42 +2180,18 @@ impl BeaconState { if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: excess_balance, - })?; + let validator = self.get_validator(validator_index)?.clone(); + self.pending_deposits_mut()?.push(PendingDeposit { + pubkey: validator.pubkey, + withdrawal_credentials: validator.withdrawal_credentials, + amount: excess_balance, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + })?; } Ok(()) } - pub fn queue_entire_balance_and_reset_validator( - &mut self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<(), Error> { - let balance = self - .balances_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - let balance_copy = *balance; - *balance = 0_u64; - - let validator = self - .validators_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - validator.effective_balance = 0; - validator.activation_eligibility_epoch = spec.far_future_epoch; - - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: balance_copy, - }) - .map_err(Into::into) - } - /// Change the withdrawal prefix of the given `validator_index` to the compounding withdrawal validator prefix. pub fn switch_to_compounding_validator( &mut self, @@ -2242,12 +2202,10 @@ impl BeaconState { .validators_mut() .get_mut(validator_index) .ok_or(Error::UnknownValidator(validator_index))?; - if validator.has_eth1_withdrawal_credential(spec) { - AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = - spec.compounding_withdrawal_prefix_byte; + AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = + spec.compounding_withdrawal_prefix_byte; - self.queue_excess_active_balance(validator_index, spec)?; - } + self.queue_excess_active_balance(validator_index, spec)?; Ok(()) } diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/beacon_state/iter.rs index 2caa0365e01..d99c769e402 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/beacon_state/iter.rs @@ -27,7 +27,7 @@ impl<'a, E: EthSpec> BlockRootsIter<'a, E> { } } -impl<'a, E: EthSpec> Iterator for BlockRootsIter<'a, E> { +impl Iterator for BlockRootsIter<'_, E> { type Item = Result<(Slot, Hash256), Error>; fn next(&mut self) -> Option { diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 3ad3ccf5617..bfa7bb86d24 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -307,43 +307,6 @@ mod committees { } } -mod get_outstanding_deposit_len { - use super::*; - - async fn state() -> BeaconState { - get_harness(16, Slot::new(0)) - .await - .chain - .head_beacon_state_cloned() - } - - #[tokio::test] - async fn returns_ok() { - let mut state = state().await; - assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); - - state.eth1_data_mut().deposit_count = 17; - *state.eth1_deposit_index_mut() = 16; - assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); - } - - #[tokio::test] - async fn returns_err_if_the_state_is_invalid() { - let mut state = state().await; - // The state is invalid, deposit count is lower than deposit index. - state.eth1_data_mut().deposit_count = 16; - *state.eth1_deposit_index_mut() = 17; - - assert_eq!( - state.get_outstanding_deposit_len(), - Err(BeaconStateError::InvalidDepositState { - deposit_count: 16, - deposit_index: 17, - }) - ); - } -} - #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 5a330388cce..302aa2a4c18 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,9 @@ use crate::test_utils::TestRandom; -use crate::ForkName; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; +use crate::{AbstractExecPayload, ForkName}; use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; @@ -150,10 +150,10 @@ impl BlobSidecar { }) } - pub fn new_with_existing_proof( + pub fn new_with_existing_proof>( index: usize, blob: Blob, - signed_block: &SignedBeaconBlock, + signed_block: &SignedBeaconBlock, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &[Hash256], kzg_proof: KzgProof, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 1c4effb4aec..0b33a76ff19 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -127,6 +127,11 @@ pub struct ChainSpec { pub deposit_network_id: u64, pub deposit_contract_address: Address, + /* + * Execution Specs + */ + pub gas_limit_adjustment_factor: u64, + /* * Altair hard fork params */ @@ -204,7 +209,6 @@ pub struct ChainSpec { pub target_aggregators_per_committee: u64, pub gossip_max_size: u64, pub max_request_blocks: u64, - pub epochs_per_subnet_subscription: u64, pub min_epochs_for_block_requests: u64, pub max_chunk_size: u64, pub ttfb_timeout: u64, @@ -215,9 +219,7 @@ pub struct ChainSpec { pub message_domain_valid_snappy: [u8; 4], pub subnets_per_node: u8, pub attestation_subnet_count: u64, - pub attestation_subnet_extra_bits: u8, pub attestation_subnet_prefix_bits: u8, - pub attestation_subnet_shuffling_prefix_bits: u8, /* * Networking Deneb @@ -718,6 +720,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -816,7 +823,6 @@ impl ChainSpec { subnets_per_node: 2, maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), max_chunk_size: default_max_chunk_size(), @@ -824,10 +830,7 @@ impl ChainSpec { resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), message_domain_valid_snappy: default_message_domain_valid_snappy(), - attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), - attestation_subnet_shuffling_prefix_bits: - default_attestation_subnet_shuffling_prefix_bits(), max_request_blocks: default_max_request_blocks(), /* @@ -1036,6 +1039,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -1133,7 +1141,6 @@ impl ChainSpec { subnets_per_node: 4, // Make this larger than usual to avoid network damage maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - epochs_per_subnet_subscription: default_epochs_per_subnet_subscription(), gossip_max_size: default_gossip_max_size(), min_epochs_for_block_requests: 33024, max_chunk_size: default_max_chunk_size(), @@ -1141,11 +1148,8 @@ impl ChainSpec { resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), message_domain_valid_snappy: default_message_domain_valid_snappy(), - attestation_subnet_extra_bits: default_attestation_subnet_extra_bits(), - attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), - attestation_subnet_shuffling_prefix_bits: - default_attestation_subnet_shuffling_prefix_bits(), max_request_blocks: default_max_request_blocks(), + attestation_subnet_prefix_bits: default_attestation_subnet_prefix_bits(), /* * Networking Deneb Specific @@ -1296,15 +1300,16 @@ pub struct Config { #[serde(with = "serde_utils::address_hex")] deposit_contract_address: Address, + #[serde(default = "default_gas_limit_adjustment_factor")] + #[serde(with = "serde_utils::quoted_u64")] + gas_limit_adjustment_factor: u64, + #[serde(default = "default_gossip_max_size")] #[serde(with = "serde_utils::quoted_u64")] gossip_max_size: u64, #[serde(default = "default_max_request_blocks")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks: u64, - #[serde(default = "default_epochs_per_subnet_subscription")] - #[serde(with = "serde_utils::quoted_u64")] - epochs_per_subnet_subscription: u64, #[serde(default = "default_min_epochs_for_block_requests")] #[serde(with = "serde_utils::quoted_u64")] min_epochs_for_block_requests: u64, @@ -1329,15 +1334,9 @@ pub struct Config { #[serde(default = "default_message_domain_valid_snappy")] #[serde(with = "serde_utils::bytes_4_hex")] message_domain_valid_snappy: [u8; 4], - #[serde(default = "default_attestation_subnet_extra_bits")] - #[serde(with = "serde_utils::quoted_u8")] - attestation_subnet_extra_bits: u8, #[serde(default = "default_attestation_subnet_prefix_bits")] #[serde(with = "serde_utils::quoted_u8")] attestation_subnet_prefix_bits: u8, - #[serde(default = "default_attestation_subnet_shuffling_prefix_bits")] - #[serde(with = "serde_utils::quoted_u8")] - attestation_subnet_shuffling_prefix_bits: u8, #[serde(default = "default_max_request_blocks_deneb")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks_deneb: u64, @@ -1419,10 +1418,18 @@ fn default_subnets_per_node() -> u8 { 2u8 } +fn default_attestation_subnet_prefix_bits() -> u8 { + 6 +} + const fn default_max_per_epoch_activation_churn_limit() -> u64 { 8 } +const fn default_gas_limit_adjustment_factor() -> u64 { + 1024 +} + const fn default_gossip_max_size() -> u64 { 10485760 } @@ -1451,18 +1458,6 @@ const fn default_message_domain_valid_snappy() -> [u8; 4] { [1, 0, 0, 0] } -const fn default_attestation_subnet_extra_bits() -> u8 { - 0 -} - -const fn default_attestation_subnet_prefix_bits() -> u8 { - 6 -} - -const fn default_attestation_subnet_shuffling_prefix_bits() -> u8 { - 3 -} - const fn default_max_request_blocks() -> u64 { 1024 } @@ -1495,10 +1490,6 @@ const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { 256_000_000_000 } -const fn default_epochs_per_subnet_subscription() -> u64 { - 256 -} - const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1676,6 +1667,7 @@ impl Config { shard_committee_period: spec.shard_committee_period, eth1_follow_distance: spec.eth1_follow_distance, subnets_per_node: spec.subnets_per_node, + attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, inactivity_score_bias: spec.inactivity_score_bias, inactivity_score_recovery_rate: spec.inactivity_score_recovery_rate, @@ -1690,9 +1682,10 @@ impl Config { deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, + gas_limit_adjustment_factor: spec.gas_limit_adjustment_factor, + gossip_max_size: spec.gossip_max_size, max_request_blocks: spec.max_request_blocks, - epochs_per_subnet_subscription: spec.epochs_per_subnet_subscription, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, max_chunk_size: spec.max_chunk_size, ttfb_timeout: spec.ttfb_timeout, @@ -1701,9 +1694,6 @@ impl Config { maximum_gossip_clock_disparity_millis: spec.maximum_gossip_clock_disparity_millis, message_domain_invalid_snappy: spec.message_domain_invalid_snappy, message_domain_valid_snappy: spec.message_domain_valid_snappy, - attestation_subnet_extra_bits: spec.attestation_subnet_extra_bits, - attestation_subnet_prefix_bits: spec.attestation_subnet_prefix_bits, - attestation_subnet_shuffling_prefix_bits: spec.attestation_subnet_shuffling_prefix_bits, max_request_blocks_deneb: spec.max_request_blocks_deneb, max_request_blob_sidecars: spec.max_request_blob_sidecars, max_request_data_column_sidecars: spec.max_request_data_column_sidecars, @@ -1757,6 +1747,7 @@ impl Config { shard_committee_period, eth1_follow_distance, subnets_per_node, + attestation_subnet_prefix_bits, inactivity_score_bias, inactivity_score_recovery_rate, ejection_balance, @@ -1767,6 +1758,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -1774,11 +1766,7 @@ impl Config { resp_timeout, message_domain_invalid_snappy, message_domain_valid_snappy, - attestation_subnet_extra_bits, - attestation_subnet_prefix_bits, - attestation_subnet_shuffling_prefix_bits, max_request_blocks, - epochs_per_subnet_subscription, attestation_propagation_slot_range, maximum_gossip_clock_disparity_millis, max_request_blocks_deneb, @@ -1832,6 +1820,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, @@ -1842,11 +1831,8 @@ impl Config { resp_timeout, message_domain_invalid_snappy, message_domain_valid_snappy, - attestation_subnet_extra_bits, attestation_subnet_prefix_bits, - attestation_subnet_shuffling_prefix_bits, max_request_blocks, - epochs_per_subnet_subscription, attestation_propagation_slot_range, maximum_gossip_clock_disparity_millis, max_request_blocks_deneb, @@ -2142,9 +2128,7 @@ mod yaml_tests { check_default!(resp_timeout); check_default!(message_domain_invalid_snappy); check_default!(message_domain_valid_snappy); - check_default!(attestation_subnet_extra_bits); check_default!(attestation_subnet_prefix_bits); - check_default!(attestation_subnet_shuffling_prefix_bits); assert_eq!(chain_spec.bellatrix_fork_epoch, None); } diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index 7af949fef3a..a21760551b5 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::{Hash256, PublicKeyBytes, Signature}; +use crate::{Hash256, PublicKeyBytes}; +use bls::SignatureBytes; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -10,7 +11,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -25,7 +25,7 @@ pub struct DepositRequest { pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, - pub signature: Signature, + pub signature: SignatureBytes, #[serde(with = "serde_utils::quoted_u64")] pub index: u64, } @@ -36,7 +36,7 @@ impl DepositRequest { pubkey: PublicKeyBytes::empty(), withdrawal_credentials: Hash256::ZERO, amount: 0, - signature: Signature::empty(), + signature: SignatureBytes::empty(), index: 0, } .as_ssz_bytes() diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 09ef8e3c1a7..23e82762096 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -151,7 +151,7 @@ pub trait EthSpec: /* * New in Electra */ - type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxConsolidationRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -159,6 +159,7 @@ pub trait EthSpec: type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxPendingDepositsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -331,9 +332,9 @@ pub trait EthSpec: .expect("Preset values are not configurable and never result in non-positive block body depth") } - /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. - fn pending_balance_deposits_limit() -> usize { - Self::PendingBalanceDepositsLimit::to_usize() + /// Returns the `PENDING_DEPOSITS_LIMIT` constant for this specification. + fn pending_deposits_limit() -> usize { + Self::PendingDepositsLimit::to_usize() } /// Returns the `PENDING_PARTIAL_WITHDRAWALS_LIMIT` constant for this specification. @@ -371,6 +372,11 @@ pub trait EthSpec: Self::MaxWithdrawalRequestsPerPayload::to_usize() } + /// Returns the `MAX_PENDING_DEPOSITS_PER_EPOCH` constant for this specification. + fn max_pending_deposits_per_epoch() -> usize { + Self::MaxPendingDepositsPerEpoch::to_usize() + } + fn kzg_commitments_inclusion_proof_depth() -> usize { Self::KzgCommitmentsInclusionProofDepth::to_usize() } @@ -430,7 +436,7 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -438,6 +444,7 @@ impl EthSpec for MainnetEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -500,7 +507,8 @@ impl EthSpec for MinimalEthSpec { MaxBlsToExecutionChanges, MaxBlobsPerBlock, BytesPerFieldElement, - PendingBalanceDepositsLimit, + PendingDepositsLimit, + MaxPendingDepositsPerEpoch, MaxConsolidationRequestsPerPayload, MaxAttesterSlashingsElectra, MaxAttestationsElectra @@ -557,7 +565,7 @@ impl EthSpec for GnosisEthSpec { type BytesPerFieldElement = U32; type BytesPerBlob = U131072; type KzgCommitmentInclusionProofDepth = U17; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -565,6 +573,7 @@ impl EthSpec for GnosisEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; type BytesPerCell = U2048; diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 694162d6ffd..60f2960afbe 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -52,9 +52,11 @@ pub struct ExecutionBlockHeader { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option, + pub requests_root: Option, } impl ExecutionBlockHeader { + #[allow(clippy::too_many_arguments)] pub fn from_payload( payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, @@ -63,6 +65,7 @@ impl ExecutionBlockHeader { rlp_blob_gas_used: Option, rlp_excess_blob_gas: Option, rlp_parent_beacon_block_root: Option, + rlp_requests_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -87,6 +90,7 @@ impl ExecutionBlockHeader { blob_gas_used: rlp_blob_gas_used, excess_blob_gas: rlp_excess_blob_gas, parent_beacon_block_root: rlp_parent_beacon_block_root, + requests_root: rlp_requests_root, } } } @@ -114,6 +118,7 @@ pub struct EncodableExecutionBlockHeader<'a> { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option<&'a [u8]>, + pub requests_root: Option<&'a [u8]>, } impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { @@ -139,6 +144,7 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: None, + requests_root: None, }; if let Some(withdrawals_root) = &header.withdrawals_root { encodable.withdrawals_root = Some(withdrawals_root.as_slice()); @@ -146,6 +152,9 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { encodable.parent_beacon_block_root = Some(parent_beacon_block_root.as_slice()) } + if let Some(requests_root) = &header.requests_root { + encodable.requests_root = Some(requests_root.as_slice()) + } encodable } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index e9690435f1f..4bfbfee9bf0 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -371,7 +371,7 @@ impl TryFrom> for ExecutionPayloadHeaderDe } } -impl<'a, E: EthSpec> ExecutionPayloadHeaderRefMut<'a, E> { +impl ExecutionPayloadHeaderRefMut<'_, E> { /// Mutate through pub fn replace(self, header: ExecutionPayloadHeader) -> Result<(), BeaconStateError> { match self { diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 778260dd841..96a39054207 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -1,7 +1,8 @@ use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; use derivative::Derivative; +use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -47,6 +48,43 @@ impl ExecutionRequests { let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] } + + /// Generate the execution layer `requests_hash` based on EIP-7685. + /// + /// `sha256(sha256(requests_0) ++ sha256(requests_1) ++ ...)` + pub fn requests_hash(&self) -> Hash256 { + let mut hasher = DynamicContext::new(); + + for (i, request) in self.get_execution_requests_list().iter().enumerate() { + let mut request_hasher = DynamicContext::new(); + request_hasher.update(&[i as u8]); + request_hasher.update(request); + let request_hash = request_hasher.finalize(); + + hasher.update(&request_hash); + } + + hasher.finalize().into() + } +} + +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +pub enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } } #[cfg(test)] diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 9274600ed2c..f3243a9f05e 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -134,7 +134,7 @@ impl IndexedAttestation { } } -impl<'a, E: EthSpec> IndexedAttestationRef<'a, E> { +impl IndexedAttestationRef<'_, E> { pub fn is_double_vote(&self, other: Self) -> bool { self.data().target.epoch == other.data().target.epoch && self.data() != other.data() } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index eff52378342..dd304c6296c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -54,8 +54,8 @@ pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; -pub mod pending_balance_deposit; pub mod pending_consolidation; +pub mod pending_deposit; pub mod pending_partial_withdrawal; pub mod proposer_preparation_data; pub mod proposer_slashing; @@ -170,7 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; -pub use crate::execution_requests::ExecutionRequests; +pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -210,8 +210,8 @@ pub use crate::payload::{ FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_balance_deposit::PendingBalanceDeposit; pub use crate::pending_consolidation::PendingConsolidation; +pub use crate::pending_deposit::PendingDeposit; pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 52800f18ac2..6655e0a093b 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -179,12 +179,12 @@ impl LightClientHeaderCapella { .to_ref() .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; - return Ok(LightClientHeaderCapella { + Ok(LightClientHeaderCapella { beacon: block.message().block_header(), execution: header, execution_branch: FixedVector::new(execution_branch)?, _phantom_data: PhantomData, - }); + }) } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index a7ddf8eb314..c3a50e71c15 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -418,7 +418,7 @@ impl LightClientUpdate { return Ok(new_attested_header_slot < prev_attested_header_slot); } - return Ok(new.signature_slot() < self.signature_slot()); + Ok(new.signature_slot() < self.signature_slot()) } fn is_next_sync_committee_branch_empty<'a>(&'a self) -> bool { diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 80a70c171f5..e68801840af 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -32,6 +32,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; fn timestamp(&self) -> u64; + fn extra_data(&self) -> VariableList; fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; @@ -225,6 +226,13 @@ impl ExecPayload for FullPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -317,7 +325,7 @@ impl<'a, E: EthSpec> FullPayloadRef<'a, E> { } } -impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { +impl ExecPayload for FullPayloadRef<'_, E> { fn block_type() -> BlockType { BlockType::Full } @@ -357,6 +365,13 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -542,6 +557,13 @@ impl ExecPayload for BlindedPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -643,6 +665,13 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -745,6 +774,10 @@ macro_rules! impl_exec_payload_common { self.$wrapped_field.timestamp } + fn extra_data(&self) -> VariableList { + self.$wrapped_field.extra_data.clone() + } + fn block_hash(&self) -> ExecutionBlockHash { self.$wrapped_field.block_hash } diff --git a/consensus/types/src/pending_balance_deposit.rs b/consensus/types/src/pending_deposit.rs similarity index 68% rename from consensus/types/src/pending_balance_deposit.rs rename to consensus/types/src/pending_deposit.rs index a2bce577f87..3bee86417de 100644 --- a/consensus/types/src/pending_balance_deposit.rs +++ b/consensus/types/src/pending_deposit.rs @@ -1,4 +1,5 @@ use crate::test_utils::TestRandom; +use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -8,7 +9,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -18,16 +18,18 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct PendingBalanceDeposit { - #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, +pub struct PendingDeposit { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, + pub signature: SignatureBytes, + pub slot: Slot, } #[cfg(test)] mod tests { use super::*; - ssz_and_tree_hash_tests!(PendingBalanceDeposit); + ssz_and_tree_hash_tests!(PendingDeposit); } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 435a74bdc35..b469b7b777a 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -263,7 +263,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_balance_deposits_limit() as u64, + pending_balance_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 8c8f2d073dd..0391756047e 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -133,7 +133,7 @@ pub struct SlotIter<'a> { slots_per_epoch: u64, } -impl<'a> Iterator for SlotIter<'a> { +impl Iterator for SlotIter<'_> { type Item = Slot; fn next(&mut self) -> Option { diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 9bfe6fb261c..187b070d29f 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,14 +1,17 @@ //! Identifies each shard by an integer identifier. -use crate::{AttestationRef, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; +use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; use alloy_primitives::{bytes::Buf, U256}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; use std::sync::LazyLock; -use swap_or_not_shuffle::compute_shuffled_index; const MAX_SUBNET_ID: usize = 64; +/// The number of bits in a Discovery `NodeId`. This is used for binary operations on the node-id +/// data. +const NODE_ID_BITS: u64 = 256; + static SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { let mut v = Vec::with_capacity(MAX_SUBNET_ID); @@ -74,52 +77,22 @@ impl SubnetId { .into()) } - /// Computes the set of subnets the node should be subscribed to during the current epoch, - /// along with the first epoch in which these subscriptions are no longer valid. + /// Computes the set of subnets the node should be subscribed to. We subscribe to these subnets + /// for the duration of the node's runtime. #[allow(clippy::arithmetic_side_effects)] - pub fn compute_subnets_for_epoch( + pub fn compute_attestation_subnets( raw_node_id: [u8; 32], - epoch: Epoch, spec: &ChainSpec, - ) -> Result<(impl Iterator, Epoch), &'static str> { - // simplify variable naming - let subscription_duration = spec.epochs_per_subnet_subscription; + ) -> impl Iterator { + // The bits of the node-id we are using to define the subnets. let prefix_bits = spec.attestation_subnet_prefix_bits as u64; - let shuffling_prefix_bits = spec.attestation_subnet_shuffling_prefix_bits as u64; - let node_id = U256::from_be_slice(&raw_node_id); + let node_id = U256::from_be_slice(&raw_node_id); // calculate the prefixes used to compute the subnet and shuffling - let node_id_prefix = (node_id >> (256 - prefix_bits)).as_le_slice().get_u64_le(); - let shuffling_prefix = (node_id >> (256 - (prefix_bits + shuffling_prefix_bits))) + let node_id_prefix = (node_id >> (NODE_ID_BITS - prefix_bits)) .as_le_slice() .get_u64_le(); - // number of groups the shuffling creates - let shuffling_groups = 1 << shuffling_prefix_bits; - // shuffling group for this node - let shuffling_bits = shuffling_prefix % shuffling_groups; - let epoch_transition = (node_id_prefix - + (shuffling_bits * (subscription_duration >> shuffling_prefix_bits))) - % subscription_duration; - - // Calculate at which epoch this node needs to re-evaluate - let valid_until_epoch = epoch.as_u64() - + subscription_duration - .saturating_sub((epoch.as_u64() + epoch_transition) % subscription_duration); - - let subscription_event_idx = (epoch.as_u64() + epoch_transition) / subscription_duration; - let permutation_seed = - ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); - - let num_subnets = 1 << spec.attestation_subnet_prefix_bits; - let permutated_prefix = compute_shuffled_index( - node_id_prefix as usize, - num_subnets, - &permutation_seed, - spec.shuffle_round_count, - ) - .ok_or("Unable to shuffle")? as u64; - // Get the constants we need to avoid holding a reference to the spec let &ChainSpec { subnets_per_node, @@ -127,10 +100,8 @@ impl SubnetId { .. } = spec; - let subnet_set_generator = (0..subnets_per_node).map(move |idx| { - SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count) - }); - Ok((subnet_set_generator, valid_until_epoch.into())) + (0..subnets_per_node) + .map(move |idx| SubnetId::new((node_id_prefix + idx as u64) % attestation_subnet_count)) } } @@ -180,7 +151,7 @@ mod tests { /// A set of tests compared to the python specification #[test] - fn compute_subnets_for_epoch_unit_test() { + fn compute_attestation_subnets_test() { // Randomized variables used generated with the python specification let node_ids = [ "0", @@ -189,59 +160,34 @@ mod tests { "27726842142488109545414954493849224833670205008410190955613662332153332462900", "39755236029158558527862903296867805548949739810920318269566095185775868999998", "31899136003441886988955119620035330314647133604576220223892254902004850516297", - "58579998103852084482416614330746509727562027284701078483890722833654510444626", - "28248042035542126088870192155378394518950310811868093527036637864276176517397", - "60930578857433095740782970114409273483106482059893286066493409689627770333527", - "103822458477361691467064888613019442068586830412598673713899771287914656699997", ] .map(|v| Uint256::from_str_radix(v, 10).unwrap().to_be_bytes::<32>()); - let epochs = [ - 54321u64, 1017090249, 1827566880, 846255942, 766597383, 1204990115, 1616209495, - 1774367616, 1484598751, 3525502229, - ] - .map(Epoch::from); + let expected_subnets = [ + vec![0, 1], + vec![49u64, 50u64], + vec![10, 11], + vec![15, 16], + vec![21, 22], + vec![17, 18], + ]; // Test mainnet let spec = ChainSpec::mainnet(); - // Calculated by hand - let expected_valid_time = [ - 54528u64, 1017090255, 1827567030, 846256049, 766597387, 1204990287, 1616209536, - 1774367857, 1484598847, 3525502311, - ]; - - // Calculated from pyspec - let expected_subnets = [ - vec![4u64, 5u64], - vec![31, 32], - vec![39, 40], - vec![38, 39], - vec![53, 54], - vec![57, 58], - vec![48, 49], - vec![1, 2], - vec![34, 35], - vec![37, 38], - ]; - for x in 0..node_ids.len() { println!("Test: {}", x); println!( - "NodeId: {:?}\n Epoch: {}\n, expected_update_time: {}\n, expected_subnets: {:?}", - node_ids[x], epochs[x], expected_valid_time[x], expected_subnets[x] + "NodeId: {:?}\nExpected_subnets: {:?}", + node_ids[x], expected_subnets[x] ); - let (computed_subnets, valid_time) = SubnetId::compute_subnets_for_epoch::< - crate::MainnetEthSpec, - >(node_ids[x], epochs[x], &spec) - .unwrap(); + let computed_subnets = SubnetId::compute_attestation_subnets(node_ids[x], &spec); assert_eq!( expected_subnets[x], computed_subnets.map(SubnetId::into).collect::>() ); - assert_eq!(Epoch::from(expected_valid_time[x]), valid_time); } } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 275101ddbe1..222b9292a2a 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -38,14 +38,15 @@ pub struct Validator { impl Validator { #[allow(clippy::arithmetic_side_effects)] pub fn from_deposit( - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, amount: u64, fork_name: ForkName, spec: &ChainSpec, ) -> Self { let mut validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, + pubkey, + withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, @@ -291,16 +292,6 @@ impl Validator { spec.max_effective_balance } } - - pub fn get_active_balance( - &self, - validator_balance: u64, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { - let max_effective_balance = self.get_max_effective_balance(spec, current_fork); - std::cmp::min(validator_balance, max_effective_balance) - } } impl Default for Validator { diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index b65b51230c3..d02e01b80cc 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -6,18 +6,18 @@ edition = { workspace = true } [dependencies] alloy-primitives = { workspace = true } +arbitrary = { workspace = true } +blst = { version = "0.3.3", optional = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } +fixed_bytes = { workspace = true } +hex = { workspace = true } rand = { workspace = true } +safe_arith = { workspace = true } serde = { workspace = true } -ethereum_serde_utils = { workspace = true } -hex = { workspace = true } -ethereum_hashing = { workspace = true } -arbitrary = { workspace = true } +tree_hash = { workspace = true } zeroize = { workspace = true } -blst = { version = "0.3.3", optional = true } -safe_arith = { workspace = true } -fixed_bytes = { workspace = true } [features] arbitrary = [] diff --git a/crypto/bls/src/macros.rs b/crypto/bls/src/macros.rs index f3a7374ba7d..58b1ec7d6cc 100644 --- a/crypto/bls/src/macros.rs +++ b/crypto/bls/src/macros.rs @@ -20,7 +20,7 @@ macro_rules! impl_tree_hash { // but benchmarks have show that to be at least 15% slower because of the // unnecessary copying and allocation (one Vec per byte) let values_per_chunk = tree_hash::BYTES_PER_CHUNK; - let minimum_chunk_count = ($byte_size + values_per_chunk - 1) / values_per_chunk; + let minimum_chunk_count = $byte_size.div_ceil(values_per_chunk); tree_hash::merkle_root(&self.serialize(), minimum_chunk_count) } }; diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index a0237ba7ede..a893a9360dc 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -3,15 +3,14 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = { workspace = true } -zeroize = { workspace = true } +bls = { workspace = true } num-bigint-dig = { version = "0.8.4", features = ["zeroize"] } ring = { workspace = true } -bls = { workspace = true } +sha2 = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index bb6222807bd..61d2722efbd 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -3,25 +3,24 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } +aes = { version = "0.7", features = ["ctr"] } +bls = { workspace = true } +eth2_key_derivation = { workspace = true } +hex = { workspace = true } hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } +rand = { workspace = true } scrypt = { version = "0.7.0", default-features = false } -sha2 = { workspace = true } -uuid = { workspace = true } -zeroize = { workspace = true } serde = { workspace = true } -serde_repr = { workspace = true } -hex = { workspace = true } -bls = { workspace = true } serde_json = { workspace = true } -eth2_key_derivation = { workspace = true } +serde_repr = { workspace = true } +sha2 = { workspace = true } unicode-normalization = "0.1.16" -aes = { version = "0.7", features = ["ctr"] } +uuid = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index 304ea3ecd6f..16a979cf63a 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -26,7 +26,7 @@ use std::io::{Read, Write}; use std::path::Path; use std::str; use unicode_normalization::UnicodeNormalization; -use zeroize::Zeroize; +use zeroize::Zeroizing; /// The byte-length of a BLS secret key. const SECRET_KEY_LEN: usize = 32; @@ -60,45 +60,6 @@ pub const HASH_SIZE: usize = 32; /// The default iteraction count, `c`, for PBKDF2. pub const DEFAULT_PBKDF2_C: u32 = 262_144; -/// Provides a new-type wrapper around `String` that is zeroized on `Drop`. -/// -/// Useful for ensuring that password memory is zeroed-out on drop. -#[derive(Clone, PartialEq, Serialize, Deserialize, Zeroize)] -#[zeroize(drop)] -#[serde(transparent)] -struct ZeroizeString(String); - -impl From for ZeroizeString { - fn from(s: String) -> Self { - Self(s) - } -} - -impl AsRef<[u8]> for ZeroizeString { - fn as_ref(&self) -> &[u8] { - self.0.as_bytes() - } -} - -impl std::ops::Deref for ZeroizeString { - type Target = String; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl std::ops::DerefMut for ZeroizeString { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl FromIterator for ZeroizeString { - fn from_iter>(iter: T) -> Self { - ZeroizeString(String::from_iter(iter)) - } -} - #[derive(Debug, PartialEq)] pub enum Error { InvalidSecretKeyLen { len: usize, expected: usize }, @@ -451,11 +412,12 @@ fn is_control_character(c: char) -> bool { /// Takes a slice of bytes and returns a NFKD normalized string representation. /// /// Returns an error if the bytes are not valid utf8. -fn normalize(bytes: &[u8]) -> Result { +fn normalize(bytes: &[u8]) -> Result, Error> { Ok(str::from_utf8(bytes) .map_err(|_| Error::InvalidPasswordBytes)? .nfkd() - .collect::()) + .collect::() + .into()) } /// Generates a checksum to indicate that the `derived_key` is associated with the diff --git a/crypto/eth2_keystore/tests/eip2335_vectors.rs b/crypto/eth2_keystore/tests/eip2335_vectors.rs index 3702a218163..e6852cc6081 100644 --- a/crypto/eth2_keystore/tests/eip2335_vectors.rs +++ b/crypto/eth2_keystore/tests/eip2335_vectors.rs @@ -58,7 +58,7 @@ fn eip2335_test_vector_scrypt() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("1d85ae20-35c5-4611-98e8-aa14a633906f").unwrap(), @@ -102,7 +102,7 @@ fn eip2335_test_vector_pbkdf() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("64625def-3331-4eea-ab6f-782f3ed16a83").unwrap(), diff --git a/crypto/eth2_keystore/tests/tests.rs b/crypto/eth2_keystore/tests/tests.rs index 0df884b8a27..20bf9f1653d 100644 --- a/crypto/eth2_keystore/tests/tests.rs +++ b/crypto/eth2_keystore/tests/tests.rs @@ -54,25 +54,17 @@ fn file() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - let keystore = KeystoreBuilder::new(&keypair, GOOD_PASSWORD, "".into()) .unwrap() .build() .unwrap(); keystore - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Keystore::from_json_reader(&mut get_file()).expect("should read from file"); + let decoded = + Keystore::from_json_reader(File::open(&path).unwrap()).expect("should read from file"); assert_eq!( decoded.decrypt_keypair(BAD_PASSWORD).err().unwrap(), diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index f3af6aab592..5327bdc163b 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -3,18 +3,17 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +eth2_key_derivation = { workspace = true } +eth2_keystore = { workspace = true } +rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } -uuid = { workspace = true } -rand = { workspace = true } -eth2_keystore = { workspace = true } -eth2_key_derivation = { workspace = true } tiny-bip39 = "1" +uuid = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_wallet/tests/tests.rs b/crypto/eth2_wallet/tests/tests.rs index fe4565e0dbc..3dc073f764d 100644 --- a/crypto/eth2_wallet/tests/tests.rs +++ b/crypto/eth2_wallet/tests/tests.rs @@ -132,20 +132,11 @@ fn file_round_trip() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - wallet - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Wallet::from_json_reader(&mut get_file()).unwrap(); + let decoded = Wallet::from_json_reader(File::open(&path).unwrap()).unwrap(); assert_eq!( decoded.decrypt_seed(&[1, 2, 3]).err().unwrap(), diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index ce55f83639b..bfe0f19cd0e 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -3,22 +3,21 @@ name = "kzg" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = "2021" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] arbitrary = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } +c-kzg = { workspace = true } derivative = { workspace = true } -serde = { workspace = true } +ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } hex = { workspace = true } -ethereum_hashing = { workspace = true } -c-kzg = { workspace = true } rust_eth_kzg = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } +tree_hash = { workspace = true } [dev-dependencies] criterion = { workspace = true } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index f788be265a9..7aaa1d99190 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -99,7 +99,7 @@ impl<'de> Deserialize<'de> for G1Point { { struct G1PointVisitor; - impl<'de> Visitor<'de> for G1PointVisitor { + impl Visitor<'_> for G1PointVisitor { type Value = G1Point; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str("A 48 byte hex encoded string") @@ -135,7 +135,7 @@ impl<'de> Deserialize<'de> for G2Point { { struct G2PointVisitor; - impl<'de> Visitor<'de> for G2PointVisitor { + impl Visitor<'_> for G2PointVisitor { type Value = G2Point; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { formatter.write_str("A 96 byte hex encoded string") diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 96176f3fba5..a7a54b1416c 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,8 +10,8 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -store = { workspace = true } -types = { workspace = true } +serde = { workspace = true } slog = { workspace = true } +store = { workspace = true } strum = { workspace = true } -serde = { workspace = true } +types = { workspace = true } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 77d122efb79..72be77a70bd 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.3.0" +version = "6.0.1" authors = ["Paul Hauner "] edition = { workspace = true } @@ -11,36 +11,36 @@ fake_crypto = ['bls/fake_crypto'] jemalloc = ["malloc_utils/jemalloc"] [dependencies] +account_utils = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } clap = { workspace = true } -log = { workspace = true } -sloggers = { workspace = true } -serde = { workspace = true } -serde_yaml = { workspace = true } -serde_json = { workspace = true } +clap_utils = { workspace = true } +deposit_contract = { workspace = true } env_logger = { workspace = true } -types = { workspace = true } -state_processing = { workspace = true } -ethereum_hashing = { workspace = true } -ethereum_ssz = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } eth2_network_config = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } -clap_utils = { workspace = true } +eth2_wallet = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +hex = { workspace = true } lighthouse_network = { workspace = true } -validator_dir = { workspace = true } lighthouse_version = { workspace = true } -account_utils = { workspace = true } -eth2_wallet = { workspace = true } -eth2 = { workspace = true } -snap = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } +log = { workspace = true } malloc_utils = { workspace = true } rayon = { workspace = true } -execution_layer = { workspace = true } -hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +sloggers = { workspace = true } +snap = { workspace = true } +state_processing = { workspace = true } +store = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index dd1cb68f066..eda9a2ebf27 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.3.0" +version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false @@ -34,46 +34,46 @@ malloc_utils = { workspace = true, features = ["jemalloc"] } malloc_utils = { workspace = true } [dependencies] +account_manager = { "path" = "../account_manager" } +account_utils = { workspace = true } beacon_node = { workspace = true } -slog = { workspace = true } -types = { workspace = true } bls = { workspace = true } -ethereum_hashing = { workspace = true } -clap = { workspace = true } -environment = { workspace = true } boot_node = { path = "../boot_node" } -futures = { workspace = true } -validator_client = { workspace = true } -account_manager = { "path" = "../account_manager" } +clap = { workspace = true } clap_utils = { workspace = true } +database_manager = { path = "../database_manager" } +directory = { workspace = true } +environment = { workspace = true } eth2_network_config = { workspace = true } +ethereum_hashing = { workspace = true } +futures = { workspace = true } lighthouse_version = { workspace = true } -account_utils = { workspace = true } +logging = { workspace = true } +malloc_utils = { workspace = true } metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } +slasher = { workspace = true } +slog = { workspace = true } task_executor = { workspace = true } -malloc_utils = { workspace = true } -directory = { workspace = true } +types = { workspace = true } unused_port = { workspace = true } -database_manager = { path = "../database_manager" } -slasher = { workspace = true } +validator_client = { workspace = true } validator_manager = { path = "../validator_manager" } -logging = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } -validator_dir = { workspace = true } -slashing_protection = { workspace = true } -lighthouse_network = { workspace = true } -sensitive_url = { workspace = true } +beacon_node_fallback = { workspace = true } +beacon_processor = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -beacon_processor = { workspace = true } -beacon_node_fallback = { workspace = true } initialized_validators = { workspace = true } - +lighthouse_network = { workspace = true } +sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +tempfile = { workspace = true } +validator_dir = { workspace = true } +zeroize = { workspace = true } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index f95751392c8..02b8e0b6552 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -6,19 +6,19 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -sloggers = { workspace = true } -types = { workspace = true } eth2_config = { workspace = true } -task_executor = { workspace = true } eth2_network_config = { workspace = true } +futures = { workspace = true } logging = { workspace = true } -slog-term = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } slog-async = { workspace = true } -futures = { workspace = true } slog-json = "2.3.0" -serde = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index e33e4cb9b81..43c5e1107ca 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -81,7 +81,7 @@ fn build_profile_name() -> String { std::env!("OUT_DIR") .split(std::path::MAIN_SEPARATOR) .nth_back(3) - .unwrap_or_else(|| "unknown") + .unwrap_or("unknown") .to_string() } diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 4d155937140..d53d042fa4e 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -15,7 +15,7 @@ use account_manager::{ use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, - ZeroizeString, STDIN_INPUTS_FLAG, + STDIN_INPUTS_FLAG, }; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::env; @@ -27,6 +27,7 @@ use std::str::from_utf8; use tempfile::{tempdir, TempDir}; use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; +use zeroize::Zeroizing; /// Returns the `lighthouse account` command. fn account_cmd() -> Command { @@ -114,7 +115,7 @@ fn create_wallet>( .arg(base_dir.as_ref().as_os_str()) .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) - .arg(&name) + .arg(name) .arg(format!("--{}", PASSWORD_FLAG)) .arg(password.as_ref().as_os_str()) .arg(format!("--{}", MNEMONIC_FLAG)) @@ -272,16 +273,16 @@ impl TestValidator { .expect("stdout is not utf8") .to_string(); - if stdout == "" { + if stdout.is_empty() { return Ok(vec![]); } let pubkeys = stdout[..stdout.len() - 1] .split("\n") - .filter_map(|line| { + .map(|line| { let tab = line.find("\t").expect("line must have tab"); let (_, pubkey) = line.split_at(tab + 1); - Some(pubkey.to_string()) + pubkey.to_string() }) .collect::>(); @@ -445,7 +446,9 @@ fn validator_import_launchpad() { } } - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); @@ -498,12 +501,12 @@ fn validator_import_launchpad() { signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, - voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + voting_keystore_password: Some(Zeroizing::from(PASSWORD.to_string())), }, }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); @@ -524,7 +527,7 @@ fn validator_import_launchpad() { expected_def.enabled = true; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -581,7 +584,7 @@ fn validator_import_launchpad_no_password_then_add_password() { let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write("\n".as_bytes()).unwrap(); + stdin.write_all("\n".as_bytes()).unwrap(); child.wait().unwrap(); assert!( @@ -627,14 +630,16 @@ fn validator_import_launchpad_no_password_then_add_password() { }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); let expected_def = ValidatorDefinition { @@ -650,13 +655,13 @@ fn validator_import_launchpad_no_password_then_add_password() { signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), voting_keystore_password_path: None, - voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + voting_keystore_password: Some(Zeroizing::from(PASSWORD.to_string())), }, }; let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -753,12 +758,12 @@ fn validator_import_launchpad_password_file() { signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, - voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + voting_keystore_password: Some(Zeroizing::from(PASSWORD.to_string())), }, }; assert!( - defs.as_slice() == &[expected_def], + defs.as_slice() == [expected_def], "validator defs file should be accurate" ); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 80986653c16..88e05dfa12d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -9,7 +9,6 @@ use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; -use lighthouse_version; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -128,7 +127,7 @@ fn allow_insecure_genesis_sync_default() { CommandLineTest::new() .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, false); + assert!(!config.allow_insecure_genesis_sync); }); } @@ -146,7 +145,7 @@ fn allow_insecure_genesis_sync_enabled() { .flag("allow-insecure-genesis-sync", None) .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, true); + assert!(config.allow_insecure_genesis_sync); }); } @@ -359,11 +358,11 @@ fn default_graffiti() { #[test] fn trusted_peers_flag() { - let peers = vec![PeerId::random(), PeerId::random()]; + let peers = [PeerId::random(), PeerId::random()]; CommandLineTest::new() .flag( "trusted-peers", - Some(format!("{},{}", peers[0].to_string(), peers[1].to_string()).as_str()), + Some(format!("{},{}", peers[0], peers[1]).as_str()), ) .run_with_zero_port() .with_config(|config| { @@ -383,7 +382,7 @@ fn genesis_backfill_flag() { CommandLineTest::new() .flag("genesis-backfill", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } /// The genesis backfill flag should be enabled if historic states flag is set. @@ -392,7 +391,7 @@ fn genesis_backfill_with_historic_flag() { CommandLineTest::new() .flag("reconstruct-historic-states", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } // Tests for Eth1 flags. @@ -448,7 +447,7 @@ fn eth1_cache_follow_distance_manual() { // Tests for Bellatrix flags. fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; - let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; + let urls = ["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; // we don't support redundancy for execution-endpoints // only the first provided endpoint is parsed. @@ -480,10 +479,10 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoint.is_some(), true); + assert!(config.execution_endpoint.is_some()); assert_eq!( config.execution_endpoint.as_ref().unwrap().clone(), - SensitiveUrl::parse(&urls[0]).unwrap() + SensitiveUrl::parse(urls[0]).unwrap() ); // Only the first secret file should be used. assert_eq!( @@ -595,7 +594,7 @@ fn run_payload_builder_flag_test(flag: &str, builders: &str) { let config = config.execution_layer.as_ref().unwrap(); // Only first provided endpoint is parsed as we don't support // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); + assert_eq!(config.builder_url, all_builders.first().cloned()); }) } fn run_payload_builder_flag_test_with_config( @@ -661,7 +660,7 @@ fn builder_fallback_flags() { Some("builder-fallback-disable-checks"), None, |config| { - assert_eq!(config.chain.builder_fallback_disable_checks, true); + assert!(config.chain.builder_fallback_disable_checks); }, ); } @@ -1657,19 +1656,19 @@ fn http_enable_beacon_processor() { CommandLineTest::new() .flag("http", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); + .with_config(|config| assert!(!config.http_api.enable_beacon_processor)); } #[test] fn http_tls_flags() { @@ -2221,7 +2220,7 @@ fn slasher_broadcast_flag_false() { }); } -#[cfg(all(feature = "slasher-lmdb"))] +#[cfg(feature = "slasher-lmdb")] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend @@ -2429,7 +2428,7 @@ fn logfile_no_restricted_perms_flag() { .flag("logfile-no-restricted-perms", None) .run_with_zero_port() .with_config(|config| { - assert!(config.logger_config.is_restricted == false); + assert!(!config.logger_config.is_restricted); }); } #[test] @@ -2454,7 +2453,7 @@ fn logfile_format_flag() { fn sync_eth1_chain_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] @@ -2467,7 +2466,7 @@ fn sync_eth1_chain_execution_endpoints_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] @@ -2481,7 +2480,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); + .with_config(|config| assert!(!config.sync_eth1_chain)); } #[test] @@ -2504,9 +2503,9 @@ fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, false); - assert_eq!(config.chain.enable_light_client_server, false); - assert_eq!(config.http_api.enable_light_client_server, false); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); + assert!(!config.http_api.enable_light_client_server); }); } @@ -2516,8 +2515,8 @@ fn light_client_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, true); - assert_eq!(config.chain.enable_light_client_server, true); + assert!(config.network.enable_light_client_server); + assert!(config.chain.enable_light_client_server); }); } @@ -2528,7 +2527,7 @@ fn light_client_http_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.enable_light_client_server, true); + assert!(config.http_api.enable_light_client_server); }); } diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 659dea468de..b243cd6001e 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -149,7 +149,7 @@ fn disable_packet_filter_flag() { .flag("disable-packet-filter", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.disable_packet_filter, true); + assert!(config.disable_packet_filter); }); } @@ -159,7 +159,7 @@ fn enable_enr_auto_update_flag() { .flag("enable-enr-auto-update", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.enable_enr_auto_update, true); + assert!(config.enable_enr_auto_update); }); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 34fe04cc452..1945399c86d 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -136,7 +136,7 @@ fn beacon_nodes_tls_certs_flag() { .flag( "beacon-nodes-tls-certs", Some( - vec![ + [ dir.path().join("certificate.crt").to_str().unwrap(), dir.path().join("certificate2.crt").to_str().unwrap(), ] @@ -205,7 +205,7 @@ fn graffiti_file_with_pk_flag() { let mut file = File::create(dir.path().join("graffiti.txt")).expect("Unable to create file"); let new_key = Keypair::random(); let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!("{}:nice-graffiti", pubkeybytes.to_string()); + let contents = format!("{}:nice-graffiti", pubkeybytes); file.write_all(contents.as_bytes()) .expect("Unable to write to file"); CommandLineTest::new() @@ -344,6 +344,34 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); } +#[test] +fn http_token_path_flag_present() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("http", None) + .flag("http-token-path", dir.path().join("api-token.txt").to_str()) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + dir.path().join("api-token.txt") + ); + }); +} + +#[test] +fn http_token_path_default() { + CommandLineTest::new() + .flag("http", None) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + config.validator_dir.join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -404,13 +432,13 @@ pub fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, false)); + .with_config(|config| assert!(!config.http_metrics.allocator_metrics_enabled)); } #[test] pub fn malloc_tuning_default() { CommandLineTest::new() .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, true)); + .with_config(|config| assert!(config.http_metrics.allocator_metrics_enabled)); } #[test] fn doppelganger_protection_flag() { diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 999f3c31415..04e3eafe6eb 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -136,7 +136,7 @@ pub fn validator_create_defaults() { count: 1, deposit_gwei: MainnetEthSpec::default_spec().max_effective_balance, mnemonic_path: None, - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), disable_deposits: false, specify_voting_keystore_password: false, eth1_withdrawal_address: None, @@ -201,7 +201,7 @@ pub fn validator_create_disable_deposits() { .flag("--disable-deposits", None) .flag("--builder-proposals", Some("false")) .assert_success(|config| { - assert_eq!(config.disable_deposits, true); + assert!(config.disable_deposits); assert_eq!(config.builder_proposals, Some(false)); }); } @@ -300,7 +300,7 @@ pub fn validator_move_defaults() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -350,7 +350,7 @@ pub fn validator_move_misc_flags_1() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--prefer-builder-proposals", Some("false")) .assert_success(|config| { @@ -368,7 +368,7 @@ pub fn validator_move_misc_flags_1() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -382,7 +382,7 @@ pub fn validator_move_misc_flags_2() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--builder-boost-factor", Some("100")) .assert_success(|config| { @@ -400,7 +400,7 @@ pub fn validator_move_misc_flags_2() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -428,7 +428,7 @@ pub fn validator_move_count() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index ca701eb7e91..159c89badbc 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,6 +1,6 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 geth execution clients using Kurtosis. +These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 Geth execution clients using Kurtosis. This setup can be useful for testing and development. ## Installation @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. +1. Install [`yq`](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -22,7 +22,7 @@ cd ./scripts/local_testnet It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. -Full configuration reference for kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). +Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: @@ -36,7 +36,7 @@ To view the logs: kurtosis service logs local-testnet $SERVICE_NAME ``` -where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and geth: +where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and Geth: ```bash kurtosis service logs local-testnet -f cl-1-lighthouse-geth diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 441e2a63575..5be5c13dee9 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -71,7 +71,7 @@ if [[ "$BEHAVIOR" == "failure" ]]; then # This process should not last longer than 2 epochs vc_1_range_start=0 vc_1_range_end=$(($KEYS_PER_NODE - 1)) - vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end-0" + vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end" service_name=vc-1-doppelganger kurtosis service add \ @@ -107,7 +107,7 @@ if [[ "$BEHAVIOR" == "success" ]]; then vc_4_range_start=$(($KEYS_PER_NODE * 3)) vc_4_range_end=$(($KEYS_PER_NODE * 4 - 1)) - vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end-0" + vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end" service_name=vc-4 kurtosis service add \ diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 56a023df0bb..fcecc2fc233 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -17,31 +17,31 @@ byteorder = { workspace = true } derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -metrics = { workspace = true } filesystem = { workspace = true } +flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } +lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lru = { workspace = true } + +# MDBX is pinned at the last version with Windows and macOS support. +mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } +metrics = { workspace = true } parking_lot = { workspace = true } rand = { workspace = true } + +redb = { version = "2.1.4", optional = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } +ssz_types = { workspace = true } +strum = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } -strum = { workspace = true } -ssz_types = { workspace = true } - -# MDBX is pinned at the last version with Windows and macOS support. -mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } -lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } - -redb = { version = "2.1.4", optional = true } [dev-dependencies] +logging = { workspace = true } maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } -logging = { workspace = true } diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs index 46cf9a4a0c3..af72006caab 100644 --- a/slasher/src/database/interface.rs +++ b/slasher/src/database/interface.rs @@ -192,7 +192,7 @@ impl<'env> RwTransaction<'env> { } } -impl<'env> Cursor<'env> { +impl Cursor<'_> { /// Return the first key in the current database while advancing the cursor's position. pub fn first_key(&mut self) -> Result, Error> { match self { diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 6012283e111..d93f3a55788 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -12,28 +12,28 @@ portable = ["beacon_chain/portable"] [dependencies] alloy-primitives = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } derivative = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } +snap = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } types = { workspace = true } -snap = { workspace = true } -fs2 = { workspace = true } -beacon_chain = { workspace = true } -fork_choice = { workspace = true } -execution_layer = { workspace = true } -logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 390711079f4..d5f4997bb7e 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.6 +TESTS_TAG := v1.5.0-alpha.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index dfd782a22b3..c1adf107704 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -86,7 +86,7 @@ type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); type_name!(Eth1DataReset, "eth1_data_reset"); -type_name!(PendingBalanceDeposits, "pending_balance_deposits"); +type_name!(PendingBalanceDeposits, "pending_deposits"); type_name!(PendingConsolidations, "pending_consolidations"); type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); @@ -193,7 +193,7 @@ impl EpochTransition for PendingBalanceDeposits { state, spec, SinglePassConfig { - pending_balance_deposits: true, + pending_deposits: true, ..SinglePassConfig::disable_all() }, ) @@ -363,7 +363,7 @@ impl> Case for EpochProcessing { } if !fork_name.electra_enabled() - && (T::name() == "pending_consolidations" || T::name() == "pending_balance_deposits") + && (T::name() == "pending_consolidations" || T::name() == "pending_deposits") { return false; } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 33ae132e8a2..427bcf5e9c5 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -809,10 +809,13 @@ impl Tester { if expected_should_override_fcu.validator_is_connected { el.update_proposer_preparation( next_slot_epoch, - &[ProposerPreparationData { - validator_index: dbg!(proposer_index) as u64, - fee_recipient: Default::default(), - }], + [( + &ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }, + &None, + )], ) .await; } else { @@ -871,7 +874,7 @@ pub struct ManuallyVerifiedAttestation<'a, T: BeaconChainTypes> { indexed_attestation: IndexedAttestation, } -impl<'a, T: BeaconChainTypes> VerifiedAttestation for ManuallyVerifiedAttestation<'a, T> { +impl VerifiedAttestation for ManuallyVerifiedAttestation<'_, T> { fn attestation(&self) -> AttestationRef { self.attestation.to_ref() } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index a9322e5dd5e..c50032a63de 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -134,7 +134,7 @@ type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); type_name_generic!(PendingAttestation); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); -type_name!(PendingBalanceDeposit); +type_name!(PendingDeposit); type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 3f802d89447..292625a371a 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -243,8 +243,7 @@ mod ssz_static { use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, - LightClientBootstrapAltair, PendingBalanceDeposit, PendingPartialWithdrawal, - WithdrawalRequest, *, + LightClientBootstrapAltair, PendingDeposit, PendingPartialWithdrawal, WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -661,8 +660,8 @@ mod ssz_static { #[test] fn pending_balance_deposit() { - SszStaticHandler::::electra_and_later().run(); - SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); } #[test] diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index c76ef91183b..9b0ac5ec9b3 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -tokio = { workspace = true } +deposit_contract = { workspace = true } +ethers-contract = "1.0.2" ethers-core = { workspace = true } ethers-providers = { workspace = true } -ethers-contract = "1.0.2" -types = { workspace = true } +hex = { workspace = true } serde_json = { workspace = true } -deposit_contract = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } unused_port = { workspace = true } -hex = { workspace = true } diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 015a632ff40..3cba908261a 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -1,6 +1,6 @@ //! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. //! -//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil) to simulate +//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil) to simulate //! the deposit contract for testing beacon node eth1 integration. //! //! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 159561d5dd8..28ff944799c 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -5,22 +5,22 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tempfile = { workspace = true } +deposit_contract = { workspace = true } +ethers-core = { workspace = true } +ethers-providers = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +logging = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } serde_json = { workspace = true } task_executor = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } -futures = { workspace = true } -execution_layer = { workspace = true } -sensitive_url = { workspace = true } types = { workspace = true } unused_port = { workspace = true } -ethers-providers = { workspace = true } -ethers-core = { workspace = true } -deposit_contract = { workspace = true } -reqwest = { workspace = true } -hex = { workspace = true } -fork_choice = { workspace = true } -logging = { workspace = true } [features] portable = ["types/portable"] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 0289fd4206b..f6645093049 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -3,9 +3,10 @@ use crate::execution_engine::{ }; use crate::transactions::transactions; use ethers_providers::Middleware; +use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, - PayloadStatus, + PayloadParameters, PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -251,6 +252,7 @@ impl TestRig { */ let parent_hash = terminal_pow_block_hash; + let parent_gas_limit = DEFAULT_GAS_LIMIT; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); let head_root = Hash256::zero(); @@ -324,15 +326,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, @@ -476,15 +485,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 97e73b8a2f3..0d9db528da4 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -environment = { workspace = true } beacon_node = { workspace = true } -types = { workspace = true } -tempfile = { workspace = true } -eth2 = { workspace = true } -validator_client = { workspace = true } beacon_node_fallback = { workspace = true } -validator_dir = { workspace = true, features = ["insecure_keys"] } -sensitive_url = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } execution_layer = { workspace = true } +sensitive_url = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } +types = { workspace = true } +validator_client = { workspace = true } +validator_dir = { workspace = true, features = ["insecure_keys"] } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 7772523284a..77645dba457 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -3,20 +3,19 @@ name = "simulator" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -node_test_rig = { path = "../node_test_rig" } +clap = { workspace = true } +env_logger = { workspace = true } +eth2_network_config = { workspace = true } execution_layer = { workspace = true } -types = { workspace = true } -parking_lot = { workspace = true } futures = { workspace = true } -tokio = { workspace = true } -env_logger = { workspace = true } -clap = { workspace = true } +kzg = { workspace = true } +node_test_rig = { path = "../node_test_rig" } +parking_lot = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -eth2_network_config = { workspace = true } serde_json = { workspace = true } -kzg = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 142a657f07e..7c297153463 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -3,15 +3,14 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -state_processing = { workspace = true } -types = { workspace = true } -ethereum_ssz = { workspace = true } beacon_chain = { workspace = true } +ethereum_ssz = { workspace = true } +state_processing = { workspace = true } tokio = { workspace = true } +types = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml index 63bb87c06e5..d2d705f714a 100644 --- a/testing/test-test_logger/Cargo.toml +++ b/testing/test-test_logger/Cargo.toml @@ -2,7 +2,6 @@ name = "test-test_logger" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 0096d74f647..376aa13406e 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -2,31 +2,30 @@ name = "web3signer_tests" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] [dev-dependencies] +account_utils = { workspace = true } async-channel = { workspace = true } +environment = { workspace = true } eth2_keystore = { workspace = true } -types = { workspace = true } -tempfile = { workspace = true } -tokio = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -slot_clock = { workspace = true } +eth2_network_config = { workspace = true } futures = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -account_utils = { workspace = true } +initialized_validators = { workspace = true } +logging = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } -serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } serde_json = { workspace = true } -zip = { workspace = true } -parking_lot = { workspace = true } -logging = { workspace = true } -initialized_validators = { workspace = true } +serde_yaml = { workspace = true } slashing_protection = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } validator_store = { workspace = true } +zip = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index a58dcb5fa08..e0dee9ceb4b 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -130,7 +130,11 @@ mod tests { } fn client_identity_path() -> PathBuf { - tls_dir().join("lighthouse").join("key.p12") + if cfg!(target_os = "macos") { + tls_dir().join("lighthouse").join("key_legacy.p12") + } else { + tls_dir().join("lighthouse").join("key.p12") + } } fn client_identity_password() -> String { @@ -169,6 +173,8 @@ mod tests { } impl Web3SignerRig { + // We need to hold that lock as we want to get the binary only once + #[allow(clippy::await_holding_lock)] pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { GET_WEB3SIGNER_BIN .get_or_init(|| async { @@ -206,7 +212,7 @@ mod tests { keystore_password_file: keystore_password_filename.to_string(), }; let key_config_file = - File::create(&keystore_dir.path().join("key-config.yaml")).unwrap(); + File::create(keystore_dir.path().join("key-config.yaml")).unwrap(); serde_yaml::to_writer(key_config_file, &key_config).unwrap(); let tls_keystore_file = tls_dir().join("web3signer").join("key.p12"); diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f918e87cf82..3b14dbddba3 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,20 @@ #!/bin/bash -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && + +# The lighthouse/key_legacy.p12 file is generated specifically for macOS because the default `openssl pkcs12` encoding +# algorithm in OpenSSL v3 is not compatible with the PKCS algorithm used by the Apple Security Framework. The client +# side (using the reqwest crate) relies on the Apple Security Framework to parse PKCS files. +# We don't need to generate web3signer/key_legacy.p12 because the compatibility issue doesn't occur on the web3signer +# side. It seems that web3signer (Java) uses its own implementation to parse PKCS files. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2469252651 + +# We specify `-days 825` when generating the certificate files because Apple requires TLS server certificates to have a +# validity period of 825 days or fewer. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2474979183 + +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && +openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && +openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -legacy -out lighthouse/key_legacy.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 24b0a2e5c0e..4aaf66b747d 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUa3O7icWD4W7c5yRMjG/EX422ODUwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 -R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 -aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE -jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw -Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe -V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI -0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM -VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr -67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f -kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa -3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf -TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO -8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE -x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI -dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h -vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ -js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 -0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP -sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ -1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex -XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI -SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 -nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt +VQQDDApsaWdodGhvdXNlMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DApsaWdodGhvdXNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsAg4 +CkW51XFC0ZlcLXOzAHHD3e1y2tCkvQLCC5YG4QGVnXtva4puSGprs5H2r46TM+92 +7EXqNls+UWARLJE8+cw6Jz2Ibpjyv9TwdHUYqlRjSsAJ1E9kFKWnQuzWSPUilY22 +KfkxkEfauAvL5qXBAX9C31E9t/QWWgFtiGetwk+MuVoqLFCifw2iKfKrKod/t0Ua +ykxm3PUi1LIjZq3yZIg6beiVIGNQ/FWcNK3NeR6LP7ZDvSWl1vJAQ/6EBTcNTYKb +B3rEiHmme20Vpl6QQMvzlZ+e+ZaU0JsycvEfKrBACvPXX1Bi1GVFFstb5XQ4a/f4 +p7LUQ9rJwOkm5mRLgrSkNzq4Nk1lPOIam5QFpdW4GBfeIUL0Q4K9io/fYsxF1DXh +fxCW1N6E6+RKhVG2cEdtnAmQxg9d8vIEMvFtuVMFMYjQ+qkJ5V0Ye11V/9lMo4Vf +H2ialSTLTKxoEjmYfCHXKu7JCba04uGEv9gzaX7Zk+uK9gN1FIMvDT3UIHZTDwtr +cm2kjn3wsuRiK3P974pAVAome+60jmH9M0IsBxLXilCI6aIcYwvHkfoSNwXQr1AI +6rBBA4o8df0OFvMp2/r1Ll9nLDTT7AxtjHu7C2HU46Fy9U01+oRiqW+UCY9+daMD +tQJMTkjfPwOU6b9KUOPKpraDnPubwNU6CXs6ySMCAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFKbpk6hZNzlzv/AdKtsl6x+dgBo+MA0GCSqGSIb3DQEBCwUAA4ICAQCmICqz +X5WOhwUm6LJJwMvKgFoVkav6ZcG/bEPiLe4waM2BubTpa1KPke8kMSmd/eLRxOiU +o1Z4Wi+bDw/ZGZHhnj/bJBZei9O+uRV4RbHCBh/LutRjY5zrublXMTtmjxCIjjHK +nQnoFFqKelyUGdaOw1ttooRT2FSDriZ6LKJ9vrTx0eCPBPA0EyaxuaxX3e/qYfE6 +sdrseEZSsouAmNCQ6jHnrQlzjeGAE6tlSTC3NVWbDlDbnX6cdRF07kV5PxnfcoyO +HGM3hdrIk5mhLpXrNKZp1nI4Ecd6UKiMCLgVxfexRKVJn00IR1URotRXZ2H9hQnh +xT5CnEBM+9dXoiwIvU+QYpnxo7mc47I6VkvoBI05rnS10bliwAk20yZuqc8iYC7R +r+ISRnhAcSb0otnKvxQQqzRH4Fi13g4mIoxbPJq+xTrNomKe/ywUe5q1Dt8QMhEg +7Sv8yg4ErKEvWIk5N0JOe1PaysobWXkv5n+xH9eJneyuBHGdi8qXe+2JLkK7ZfKB +uuLZyQcbUxb0/FSOhvtYu+2hPUb7nCOFvheAafHJu1P0pOkP8NNpM9X+tNw8Orum +VVFO8rvOh4+pH8sXRZ4tUQ33mbQS96ZSuiMJYCQf6EDkqmtRkOHCAvKkEtRLm2yV +4IRAZKHZaeKYr1UXwaqzpwES+8ZZLjURkvqvnQ== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index d00b6c21229..2b510c6b6db 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF -vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA -61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE -t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI -pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA -H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q -ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq -zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r -dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 -bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H -NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 -55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk -pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs -MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp -VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD -wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U -XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f -3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm -FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL -JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo -HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj -CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY -zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf -twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc -k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL -xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ -QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 -nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa -6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S -O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf -+e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr -bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp -NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW -RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG -lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr -UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE -xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf -I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR -y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa -fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ -9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch -IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv -Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft -Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c -GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 -LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts -ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T -NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh -HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg -vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCwCDgKRbnVcULR +mVwtc7MAccPd7XLa0KS9AsILlgbhAZWde29rim5IamuzkfavjpMz73bsReo2Wz5R +YBEskTz5zDonPYhumPK/1PB0dRiqVGNKwAnUT2QUpadC7NZI9SKVjbYp+TGQR9q4 +C8vmpcEBf0LfUT239BZaAW2IZ63CT4y5WiosUKJ/DaIp8qsqh3+3RRrKTGbc9SLU +siNmrfJkiDpt6JUgY1D8VZw0rc15Hos/tkO9JaXW8kBD/oQFNw1NgpsHesSIeaZ7 +bRWmXpBAy/OVn575lpTQmzJy8R8qsEAK89dfUGLUZUUWy1vldDhr9/instRD2snA +6SbmZEuCtKQ3Org2TWU84hqblAWl1bgYF94hQvRDgr2Kj99izEXUNeF/EJbU3oTr +5EqFUbZwR22cCZDGD13y8gQy8W25UwUxiND6qQnlXRh7XVX/2UyjhV8faJqVJMtM +rGgSOZh8Idcq7skJtrTi4YS/2DNpftmT64r2A3UUgy8NPdQgdlMPC2tybaSOffCy +5GIrc/3vikBUCiZ77rSOYf0zQiwHEteKUIjpohxjC8eR+hI3BdCvUAjqsEEDijx1 +/Q4W8ynb+vUuX2csNNPsDG2Me7sLYdTjoXL1TTX6hGKpb5QJj351owO1AkxOSN8/ +A5Tpv0pQ48qmtoOc+5vA1ToJezrJIwIDAQABAoICAAav4teBDpSTjBZD3Slc28/u +6NUYnORZe+iYnwZ4DIrZPij29D40ym7pAm5jFrWHyDYqddOqVEHJKMGuniuZpaQk +cSqy2IJbDRDi5fK5zNYSBQBlJMc/IzryXNUOA8kbU6HN+fDEpqPBSjqNOCtRRwoa +uE+dDNspsPx6UWh9IWMTfCUOZ8u6XguCWRN+3g6F8M2yS/I9AZG81898qBueczbR +qTNdQoAyEnS2sj7ODqArQniJIMmh3he5D15SrNefeVt+1D5uGEkwiQ9NqL58ZfGp +zcPa7HWB/H7Wmac3W0rwpxfDa5fgIq3Id93Sm9fh/yka1Z28c8cGgknxxKiIs6Jg +F7CKZIBJ3XxjcgytB223El/R8faHLpMJSPadDZ7uuU3yD/Qvp/JhRrdgkpE5bbzC +rWL92eVL86cbI/Hamup7VZMMfQpvjJg7FXPUr6ACKBetNkvXH0rqAkxHR8ZgfTeM +EwrpSWS0aktxxeMjzPq4DUaKKVGiN2KMDhbHEd5h2ovWMzyr14isohW81Z8w5R68 +F+2jq3IlVTLe06vmTRXAhOpwecj8UpraZjM1qyFpBd/lAolTjjMxzKJ2DcHlWI8Q +7e9LMvt1fj3bbzJVubdrITjdeom5CnDrmDGcErX9xzom8m3auYLszUENp/sfIHru +0DP+LKb2W4BOmXKs3VABAoIBAQDm4HNpOA7X7Jw7oowS4MoZOeeTjzcldT2AP9O7 +jFf2I2t5Ig0mIIrIrEJCL1X+A3i3RblV7lhU3Dpag8dhZUrXhydgnXKEMH/zz3gx +daCY1NO1fxAx5Y4J8VlCMIA7FpZI6sgRPjLBOFdkD34HcKHsUu/r3KQ1A1xZGLOU +o1kxF2WyORGBwn83kWzhzK9RIwFIdx67m7ZLzwoD6nQul4A6qq1EE+QI5x4UYpBx +ZvQsWUtj0EujIKJFszJczivwGQ86Aj0MB7EaHg+bWtYET1kUmDmc/72sksQJVcsK +wYtkv/MsznAvuWfHVjYJo47+Qs1zpuDKEUC1cu768LtlKpljAoIBAQDDL/T2KilF +qK8IW2u7nyWY8ksN/xJOVC79ozx2cGlR/zbeht051NiaLP8YMwVKl618Bw5L+aHG +a1xA0AeuTvuo5TK/ObrWzMAY6A35gXPMd8msN6SJzIKHZSZrcg2GXTSFkn7iCRJp +vl58VX4FubfrNIXy3NGbgF2muz3Rwvk7bj5Ur3NxX574RLSuftw01rDt2fnfYGKD +NfLXzoR3rJ/E+wmS7sjBJbltvmySDZOyjDDJwAgMrn45Xbh9rVT5w62BbAJ78OTY +O3CBf9t40FmeSBlelqwSY6tUmf02+B8FhMTJzxlaCup2qIPn5z0RHIZ43bnqZ/X1 +nkNSs8ko0f1BAoIBABCw9WcL+Ha/0mO1Uq8itTmxp/5RAkmg+jtFYgdTFCDlWqW9 +QnoZLC9p1Lh4N51PnvCRB98ghh5MdaOJl2aBLjH6wWwItfi8kOONgkEBIgUqjcu3 +TfJtiCFL44oXe43KCj9nSeOFPaIecqL3Q8NB71LohBPnNa/neEuwr3r1fENCT8Xc +vllFOHFKADcq1xnkj/kvM3eYwEsmwrCZyKB9r3WOVUxwq7HBE7mhjpPEP67dHcgv +jOhUOacUV3XCKgcHqMQm2Ub/X1xmA/bVUFerbONCRhgFnS7WxXlvTGiQqYU1I11/ +5zhsDQaqQunbe0ECj1vnGqVBLg5wKrrVoJalx8UCggEAE8438wqQKYtWR2jPY7hg +XkanqwHo353XLtFzfykk5rcY4DebFxUr7WkHcXMr5EfDyMQGhVsNOU8Hi2QQg3Vs +P9UR8yludgFMtLpHQLwL/gFhq2HyBjGERSzUWy61hJ7Mh4k36sO05Jn2iHM8WGRh +7zHjLaOOeVLrLdHuEezQ0WD8Xid3dVeYj+SY2OPygEIQrfHiUvI6zMmanJ9N/b68 +b4ZxkEE+iarESAh8h81s4T8sbCxaJL9H+5Yw9D+0UauzXWCSV/U3o2FUpy9MG9Q4 +Y8E5Icn0J+GJLwp5ESzYKP0x4rBrCCH3bJbo240xOx1D39vP06M85/FpL2kizkuQ +gQKCAQBTmQd/wT+0hH2JoEA2yCtB3ylDSmarZr9yZ83j3hy7oJOL48FhzhMTGjNR +BqmwbV3/2Vky85FYXYwcOIHbwI8twKtI4OxOiXLnLkYZ4nNXLm65ckR1SfJhRyrM +8K/alI2l3AxY/RkZiUnnRGEAmjG8hwzka1Y6j9zT7KhFTTBlg0YR5TOD8bsd9/rX +yVR+XkgyxIshgcI6w7MnwdGt+aAGokGjZv+k09vTOnaFF4rcJgOCZ9t4ymnG3m+v +Ac4I2b8BA46WCxA6zeNn5IeKZL0Ibgv1NGbTW3vEzu2D9VNU3pqTm9Pq3QpMAp85 +UyUzHP+SV/CL1Otbg/HjN6JGIcgY -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index 73468fa084b..f2ef6d20e27 100644 Binary files a/testing/web3signer_tests/tls/lighthouse/key.p12 and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 new file mode 100644 index 00000000000..c3394fae9af Binary files /dev/null and b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem index 6266cadf9bc..cae7603320d 100644 --- a/testing/web3signer_tests/tls/lighthouse/web3signer.pem +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUTFaMFhei/518WFdGuVrjhuPl+RAwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS -cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz -1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa -J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H -9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G -WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB -YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 -4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 -HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr -rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS -8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN -/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg -/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu -taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 -RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d -0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv -gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ -v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut -OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 -mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 -4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z -yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX -JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS +VQQDDAp3ZWIzc2lnbmVyMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DAp3ZWIzc2lnbmVyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAm8ys +aOKMuGgcCHfDlOM0ACovWvH81bU9qtiw9QbwMHmcBU40KJ6grfCCpOzQSa2wtUkU +hwM1YDOqGYndunyAFEMGuk/KAFkMsAuKxkQDmACaAoQGga8S6uF8ggq7FKa/puq2 +8CauA1ukoKkzu2zkAnSQILqespqNlsAdmltW/G274QRtQgP9q09pfZQXo8hmhwn0 +yuPnOc8PwC4gFMDssQsJum/FsttmeZhMOqL7hPIJ4hMyX+BQMW/XC7QiT6YXuvbd +045J+KVO6JGnLjMAL7ZKkizOC6GRjkvIylbcppCnxLJZkOM0cbdJ/zKowkl8U7un +J6oIIXb8SIVWapl+E8oaeRcx+7PuSqp4vUly7GkzK3YjMm6CMtdpCDt80wmq4ljt +ZSqURZ5XFKV+kd0b7KjRZAhhBxHcb/L/ScpKj95a7Nwqc/c42ABwLNCfyhUoNrbv +JOXjDNVbq9WWUrkBO3/2p1wDOYqip/8Bh8RfJMsqrpb2p4qe92cIhh6uvnftYEW/ +eMnz3T78/Z4QwSzr018ak84lTQWoQv1c5ikkf5a1eD1XJXIUfV8TcteiynATKtMo +vxb9p8C0StSPRgP/ep7g6JxwWOQBnQo8b2VIBK0fGuiU+8Nd03zmQx4n4Szv5mRE +4MAGGF2KKmd2/7FsHEZJ/vV2jGioXNMnUb65y00CAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFJeXpQKlrKwdrz52DAe37aZ+1fSkMA0GCSqGSIb3DQEBCwUAA4ICAQAKsLqM +yYr3bpx+6SzVLxb+KmNvoXELyzZkaJJX0nBmxmL+u7WD7J8NT61QPte89I6MDPVF +6qJTwBn5JE5yfBpy+D106s9nMAFLyTP07K1SbgaWbxL4LdxB0uMORj5Bt643gJCH +pkhGIOOv2XNAnuPonV6dJmaaKZz4MD2c6bN6DcfXlR92VlMoLv231M1EbgJvOJmg +9SEzwAkcLHhtuTbwyZv8+UgjsKhlZKJygaXTJVMacSXHUKoszvvWCWNK2ITReuPE +qWUd/AveZs+H+S0UDwJj9yXo47+ZSiXGxLUzdH5AutnapDTNTAQ1DySqtrJDUbw6 +GHsAPWtWefsJXDEuctgT0U+PgtDvGhA4Vv78Xrlg655jAYrKqKAbY9E21vDPv97J +1oNzbzsNlP5sRhlypf6VOeIuHF/T1m2MNtCyx5o/yH9EDLMicS629kMb6eBAK/qJ +MRFqRc9AbBHzIdC3f/YmG06WXc3fViPIAHP4zLC+wOjy4btC3pLQttQtrsOyqIbo +7IRPJ2PveMKGGaMCB+raGMO8kD4giJr3iUhrJde8Ggn32Ngngh9SsJFbGUWCPDmk +20USiQ5GU9CQlQhwDmA0K7vyjhOALP0bseTISqKkWeZZqeLrn1Y9Kl7rl46aEnzi +zs2KFCBovSPHkorjrhbm9N3KNpkBZaoa7SPA7A== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem index 6266cadf9bc..cae7603320d 100644 --- a/testing/web3signer_tests/tls/web3signer/cert.pem +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUIP5CN0WpH5om1bGaFn17Xc5ITJIwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUTFaMFhei/518WFdGuVrjhuPl+RAwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDAp3ZWIzc2lnbmVyMCAXDTIzMDkyMDAyNTYzNFoYDzIxMjMwODI3MDI1NjM0 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDS -cvshqu3747j4KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz -1FPflpj7pVWagWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaa -J3smHx2+VOhaMWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H -9C0UBIzXP7PnGrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6G -WLtJvk5ekfOVlRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pB -YKPThE5zW5KhIxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK8 -4y5L4BXxxohG0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8 -HtmSgkPEgfSVRxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQr -rrIUQAuXDcQX11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS -8kbmmuhxshsnZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN -/IC4PpwtYUO3/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQURs+EV23UZh/nDfRX412nxbn4dc8wDQYJKoZIhvcNAQELBQADggIBAHbg -/YOp/MAf+inmH9Docup+Uj/WVJ32I1mMXlpoTKQ6YExR0DAtf1bmP65EGyvJkFTu -taGM4FNdsn4JCJxDfCY5X5M5YcPmjj6n58UcFr418DiZFCRT5MAdOxyYZVszFIc3 -RiYiOocbM30tGiqFm23NwWlAmaSjIeozERk2RgdRDnDG08xEbskn2yvsvvgnZJ8d -0wxyMPHvno664bCNOJfljXYclHBk2coOFDWJ5q8DFCBLXlt+Z95ceaNLA9bMXfhv -gVnKWn+1hcD33pMGyH7POXt+neZxIracTUJDIm39Vx0sQmHdeDxGSe7+qI2dYKbJ -v6srSWw4Y5TEPpkdXg2+R8zM2hO7kxDqjWDiCTjeMWMEdmUW/hYN6ndhfJ5ZLKut -OM/2jAf+ZijB1j7ORgP7haa//31YaPS4efnurDItI5dlQkLY2gKjLfdsEe1NsVR5 -mUjE8HZoVGRFfGca+39TjTTp+mVN0bQhoi+qu11QwB39hl/3I1jVjmUb71MAmva2 -4wh5RblJukbFVcs5Cco1+fpd7j9pSrWD/wsf+l7XM57Mvt9his8pk9yZolLgKT0Z -yio8eJVOfTr8JHmVpbvE3KQ8cLk0qwjs/iSzsSA0wau9RXNmJVVGHWqEjo+i7dzX -JzEM/ha455mjGbrAqJLFMC0yMMjQX4YIvGJENqRS +VQQDDAp3ZWIzc2lnbmVyMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DAp3ZWIzc2lnbmVyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAm8ys +aOKMuGgcCHfDlOM0ACovWvH81bU9qtiw9QbwMHmcBU40KJ6grfCCpOzQSa2wtUkU +hwM1YDOqGYndunyAFEMGuk/KAFkMsAuKxkQDmACaAoQGga8S6uF8ggq7FKa/puq2 +8CauA1ukoKkzu2zkAnSQILqespqNlsAdmltW/G274QRtQgP9q09pfZQXo8hmhwn0 +yuPnOc8PwC4gFMDssQsJum/FsttmeZhMOqL7hPIJ4hMyX+BQMW/XC7QiT6YXuvbd +045J+KVO6JGnLjMAL7ZKkizOC6GRjkvIylbcppCnxLJZkOM0cbdJ/zKowkl8U7un +J6oIIXb8SIVWapl+E8oaeRcx+7PuSqp4vUly7GkzK3YjMm6CMtdpCDt80wmq4ljt +ZSqURZ5XFKV+kd0b7KjRZAhhBxHcb/L/ScpKj95a7Nwqc/c42ABwLNCfyhUoNrbv +JOXjDNVbq9WWUrkBO3/2p1wDOYqip/8Bh8RfJMsqrpb2p4qe92cIhh6uvnftYEW/ +eMnz3T78/Z4QwSzr018ak84lTQWoQv1c5ikkf5a1eD1XJXIUfV8TcteiynATKtMo +vxb9p8C0StSPRgP/ep7g6JxwWOQBnQo8b2VIBK0fGuiU+8Nd03zmQx4n4Szv5mRE +4MAGGF2KKmd2/7FsHEZJ/vV2jGioXNMnUb65y00CAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFJeXpQKlrKwdrz52DAe37aZ+1fSkMA0GCSqGSIb3DQEBCwUAA4ICAQAKsLqM +yYr3bpx+6SzVLxb+KmNvoXELyzZkaJJX0nBmxmL+u7WD7J8NT61QPte89I6MDPVF +6qJTwBn5JE5yfBpy+D106s9nMAFLyTP07K1SbgaWbxL4LdxB0uMORj5Bt643gJCH +pkhGIOOv2XNAnuPonV6dJmaaKZz4MD2c6bN6DcfXlR92VlMoLv231M1EbgJvOJmg +9SEzwAkcLHhtuTbwyZv8+UgjsKhlZKJygaXTJVMacSXHUKoszvvWCWNK2ITReuPE +qWUd/AveZs+H+S0UDwJj9yXo47+ZSiXGxLUzdH5AutnapDTNTAQ1DySqtrJDUbw6 +GHsAPWtWefsJXDEuctgT0U+PgtDvGhA4Vv78Xrlg655jAYrKqKAbY9E21vDPv97J +1oNzbzsNlP5sRhlypf6VOeIuHF/T1m2MNtCyx5o/yH9EDLMicS629kMb6eBAK/qJ +MRFqRc9AbBHzIdC3f/YmG06WXc3fViPIAHP4zLC+wOjy4btC3pLQttQtrsOyqIbo +7IRPJ2PveMKGGaMCB+raGMO8kD4giJr3iUhrJde8Ggn32Ngngh9SsJFbGUWCPDmk +20USiQ5GU9CQlQhwDmA0K7vyjhOALP0bseTISqKkWeZZqeLrn1Y9Kl7rl46aEnzi +zs2KFCBovSPHkorjrhbm9N3KNpkBZaoa7SPA7A== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key index d9697534064..97a36d1bb2f 100644 --- a/testing/web3signer_tests/tls/web3signer/key.key +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDScvshqu3747j4 -KMaGyGW0CA2GAznogVyKqNt4lan/8mdYUI2PUeezaUOnmoyM9oWz1FPflpj7pVWa -gWlSOgZ9vOElqQhe+la4ZEdGmOpe44c1rBoeHK314Gbmr2EuCxaaJ3smHx2+VOha -MWDeebRHQqy/s5tf3Um7G2iXU2iexriz42I8d6efWGmaL2sTLQ6H9C0UBIzXP7Pn -GrMlef9eR+7pu/ai9MjD1M7CWpwvPhEjanA2InwKugiDXj+A5/6GWLtJvk5ekfOV -lRHPZQbKJc/SG9tbbH9dHLEezIbZ6a5Y0iTcIfoiBxUpX5KyK/pBYKPThE5zW5Kh -IxXcpqFIMaTW/nK33BlOJ0fPNtX/SWLyoBsTtxCo1XFFUjHCkXK84y5L4BXxxohG -0DAuO4BtQHE5hgyswGQX2t4RjDvzvSm4tN02m9HUh7gu/d2FbgX8HtmSgkPEgfSV -RxegmbA71qHqKS0/i5BbnQjLkeWiWKRWGJoHFfhGN1sY0jUGFvQrrrIUQAuXDcQX -11UzgwkX5/cowtlm8IB/RWggPfC4gfCL4QvNz4pMxuMUWjXUn0uS8kbmmuhxshsn -ZUL+l+nnpRSobZqHRvvqiFKg8q9GsBUTGu0fFbjDeVQyYF2UOWeN/IC4PpwtYUO3 -/gR0babEffgYOWwWbQQGSPcmG7Y4zwIDAQABAoICABRxePXJ+KOpznPE5Owo7BWe -BqTzC/K1xlCYm0v5IJzYEQlM4e4p4wZ+/kR6Hex/nM4IR+bbZpxjcOUObIsWpJTI -VAgS2y5RcTp+UJzfXpJogIpKiqBMNutAqPOrK8Hg797PtlsmAKoBmNn8xqU1+2Oa -FX/rKaJus6qKZ2bz16DnkFUL4foabDJte0IFbd2yAyGv1ZqGiqFKSJFK+wYeoMZU -LzWOEyUR/wK5ryVwJJCY8z9BKAoKNYnb4oHTFlDRDdztIlxv29sR9dtHsjA3EdQc -nOCTNi7eY6JJlucgBSWGrsS6vTvpImGggIIWt6sOh0Px6Fg0F7mFtsESex2GePow -50MwKFbbVo3TUYRYTggJj7ba4+yrl/dsAWJUX3F90xNj/6REF+2+Licb7kgCHQKw -TvdExiikOOFtuFRkl5fqyoM9Ph+sj7/db5Pd53D8vaMjR3Yw/JA5dKPZS5ZKHBs0 -qo7FxV8ZlOESMv2eF6y0kM4wLhUN8wnEWxpsFWtXDNjYIlQ6W5qrfwR1vlnIkrmb -bYQCJFtko6CKUEa8yb4OvLgyX6VSskeYEC5zdekivZWJN/OZZa/xIS2nupYqD4GT -Y3QcsEhfzDvVIwI7M+eBwS5qjgdwN2qEGrXva5KKesb2zdjNircKaUahTWJNYHjj -jHGOSY/vyGFH2HFZNYZpAoIBAQDyoMpeXBDQhAXbHpIm6p8KljqRMHU05UeRRWVR -d0RKXGYq/bUzoAhr8F2QE2+HC+2NnBGh6qR5QNO/6H6p8Du6aSXDaDNJxTErOOmY -pAkbOlcA7TjpDSrNUr4EfAXl6vUF7JB8jJHEXIqBkbGWOFYPzwLEwErQAlQN2u4e -u9HKG3Me+DP2IcrCgZ5iWvmjV4l+vXYyBEXoJqHOWEscWXHiz64c336oZqwqKe/x -s8Xy2sd6FRU/mp34wXT4kZ56/U4BV+DEN20fffBiTfMQxKmXhMykmD/O63dASCiA -seZrZK5mRND+aS95MqI6FMm0ToKj24RvvAWR8w50cuF7wl5zAoIBAQDeDC6ImN7K -mSLaMBaIhoZsJDdG0cJiFPRmwtepeoWt4qUWuc51LOFthhlkyGx/JbEzFMK6uYTu -hHHNOgk6ydrz1+HOzpSvN0Iz61j1hJd8Ve/0MyTBg912FPe2p3hR9dN4j5Ly+oes -QvNIr/ReW5HJhDcgXm/9oT68XyzrKM3t93XPoO4wDPSHPbRWE2dzLrNi1xg/ZyRz -ZLAtBsGPG5rVAeSEob0ytZH2H1pHfkRQ/1jSKxwb+QVMfjDd5FrEAMLA4E6J8HFz -RDHTmrveGrR1i5BJrce3VUOAuL7Y3iw6Sb+b1LyA8htxiYfBVdVfCeocDv64m0R5 -NJs6Milm9uk1AoIBAQCdQLForusG+kqBVjMLng0uY2faKjoM6n2UHhIo1tAgEfr1 -6jHDH/nVW5iIhNBICucQXRLgip/HJskXHKzbn6RWkUe0epijO3c+uEhOciKkzw8M -vrOf+LTBFtupNGjuN3ZPPJ/42XKwffoXOEKNRj4hSN5Wfvr+DkREJp0mtjymbVwT -unKTGBu+LRxmSuh5gYbP6iPtDu/wIvnEL12fJim2Azyp4gDJTKJRQZUOZqHpYPrg -mUGIU8IHM/uID3lT5VDldftrsTC8tHdUf4kGWTBB0ASCuVrB1cMYmqwFnUfmWv7d -scRy3+Gw/6w9ULPadPgfE2umr4o8qfe4aazS9YsZAoIBADZH+hQwcr5KQ0fdW5TS -dgf3rn+khYVepAR++yOWLRm9/yeYEo14hD82+fw2Nre6aiAXoibtdT6tp/hIiLsT -X3AexTe+LoDK3Gc+0Edsu2+MvpUO75xS9Q+JvqirNfGrS5/8USsO7Z3B3CFXykBK -2E/P/33tOCljgqegCKYQGo9i4Cz6pV+fuyNYhT5Jjg+NShMOjAHr3/BJm/vV2/l1 -ARuzU77MnyjHVEA7l+FET8URNxBhs4RvEsmJS77itQGXQgTOkMSNv94yvI+DEwwP -sS/PB13LmrgJou/TuevgHCW/o5Sfo9lN1kGiIkq0Be4uyUlErSZJ5qpOnufSHWbr -U0UCggEAC5WM3BXKo11Y+XphsYnpJesiB9C5HMvhnB5oCHH7ffIVqkXp2AiUnWy6 -HE+DwUWFEtRLYr4beTXn+TeunoQa7X5K1JXV41XENf5CsbQTIUnX2j7o2ilCEx9C -rDPtpUZPObqXHBiHSF67Il7GitCud+7YDAGqbJABlV3WF0MkPIfW/cxN3cb65FoI -AEV3OZiS6zvDR91++ovNV5QAmH1vljvipM7kKy5RsLFF8GYa0KNTNJ/EYojKmw00 -2OakG0pjjDcWjfdGI+i5gcHNUZwbgqx4NG/RY3YslJswBhGGlhEGuuUtpH47HTM2 -oJ/aHbXf6PdOO9MYiI/es/dfKK8ywA== +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCbzKxo4oy4aBwI +d8OU4zQAKi9a8fzVtT2q2LD1BvAweZwFTjQonqCt8IKk7NBJrbC1SRSHAzVgM6oZ +id26fIAUQwa6T8oAWQywC4rGRAOYAJoChAaBrxLq4XyCCrsUpr+m6rbwJq4DW6Sg +qTO7bOQCdJAgup6ymo2WwB2aW1b8bbvhBG1CA/2rT2l9lBejyGaHCfTK4+c5zw/A +LiAUwOyxCwm6b8Wy22Z5mEw6ovuE8gniEzJf4FAxb9cLtCJPphe69t3Tjkn4pU7o +kacuMwAvtkqSLM4LoZGOS8jKVtymkKfEslmQ4zRxt0n/MqjCSXxTu6cnqgghdvxI +hVZqmX4Tyhp5FzH7s+5Kqni9SXLsaTMrdiMyboIy12kIO3zTCariWO1lKpRFnlcU +pX6R3RvsqNFkCGEHEdxv8v9JykqP3lrs3Cpz9zjYAHAs0J/KFSg2tu8k5eMM1Vur +1ZZSuQE7f/anXAM5iqKn/wGHxF8kyyqulvanip73ZwiGHq6+d+1gRb94yfPdPvz9 +nhDBLOvTXxqTziVNBahC/VzmKSR/lrV4PVclchR9XxNy16LKcBMq0yi/Fv2nwLRK +1I9GA/96nuDonHBY5AGdCjxvZUgErR8a6JT7w13TfOZDHifhLO/mZETgwAYYXYoq +Z3b/sWwcRkn+9XaMaKhc0ydRvrnLTQIDAQABAoICAAkK8k+CJWXHUSfiqlJCylq0 +wsaLY2qUrZgezKKO0l5rPeAxn6W4k0iVPEKjUMboqGgqmODp5u1cV6P01GRFuU9e +a+O2IYTZ+6UKKSCvLkA3z2M+/iZadDAi102wSWx+Qy3Gfh4ROKoO5c7CE15uXbY7 +cBQeO3t6rKYnz3ANRiQPoHHZMRg9pWRjOb26seaP3qI5ieDuz8BotsLSSoq51I3F +ahKioyhpmJiNJjuEXdyOXyl6hhgk1l1liZHvKiCnr2/qx97xaXS075ALHYIjyVUm +RkzBV1I7K04a3Z7KN4i0E7C/I04ICfFhuznvMEwFOjdH8ytCW1zJ4a8PN3/8xGGo +DpgfxkpJtcTWG6KLIA7yR6o0k4qhiBHu7Tki2WuCUOYI+L7hcKx3IV3JUWLd2djd +ic2kETnu/ARl1WhDwq/GJSmCoMf1svJGPyMimOpd0xW39DrIB02CHymLe64GsXES +bk+Tma2Cwe8vRmSgB832h5x81u32S9POdWQMCRePaIS84Lay8ICrw47a00itTMhI +BzHqXEOqn4kiLlLeKiIHW8btDSQH/YAHE6Yavzn28lmvo2Ad0VGxhSOo4X2KKvNo +MUdLpoDdJvwz1Mt4XLGqtOzJe15I3lvHgc9D0Ea7O82FUvfm6t4KHML4o9QRZ3P6 +EQQlOxiLaF/V07bM2bfXAoIBAQDaORYMakf3mybMC1JypVGIW/bjeOvPJyuKqjLR +emdDyXmZ8m1opJLMT/wBTLCFuVFIgjKH+UW8S8utymPIh70RQ2fVh4HFhays9qgu +FQQGq1QVeoCcKeF8yZjvCDSDtT9y3hHPDpFAv/q7RvrzpySQgptsEeZ8XrScqFqO +5ukZQg7Q9eEQY6djbyLDPoT3bRBTmtMJc2PQM/w7IBnmmpUMh+qpg7/0WDeIP4ur +hkMB5Var0X3WPJnomvfbrq0nc9o7FZ1n5Lb7vwIYDIMQY+1G6lMPzf2QDYJxj/ns +1Mw56AO8vy3YpDQktUuYMXrOSO7ieyeiE+PP/pIB2eJB3OBPAoIBAQC2xS/eb09S +v7Q235rc6M/giqErI+s5M8iQkAh6pr6o/gtaUyFWN9neL9GVAcgWcr6X42NnBP3b +XJ116WRRlS4d6bMvjty3JAXWgOgcnFGwZMzM3jKwk0bSJhQ0A5/FMADcSeGeDisD +VfSOLiga7vxhuDEyt5vbX/D6jLBoGPryJyzLrPwawRApoTffOFdWLvuc0E3nsLs9 +KGC2AL13PwgEmXYlXrEYMCQD9ptPJxp4a9CWZ653/WCFvtwvdYy8nKjkyRiZnFgu +JpWYx1Q+iUGUt0P9J22LYMi2s13eOmEnwirWPaNuscTWwR+MVQ0wV0ajMb9WwbDa +BHxdgjOnOjejAoIBABnShXxkmy1+i4G7mT79tv8murlCFs1Fek6HQ0osbMnFroD8 +AdxPFRveHxjcRUsdmbO1zFrwsULNyUVAXLxe641+Z2wKA94mqj8xVMdXL0nGvkVo +YI2aGxeUF2e5ldU7/k0OYBcbRy7dSMMebBWm4pnDCrcoCxkOb9rVxRmB03Vsrqol +XkN9N1J51Sg7XqqUmkj6WhyVLet2K1Onwcja4+Y+sxLqBy3XvoJ92n4qfgVcaPwd +5wER3Lh8lfXF3rF7cQhqz1x64Pg7KvlDicLHwE0S709Adc+0+YmMmuteZug3PzH3 +gWpQS+dIGLDisgB0+ueL4S25osx6+DYF3VMj2tsCggEAEnTTIqkm33pQXoIyB0DT +TR9yqwKTjFE4XbDNymTwN+x/hP2EDkrRi2hUx7FIXUv1By2FKjAM2ov9spyA7uQb +phWlbJYGn8/ZbxHRXqJCdYeYTs/ZWo9kxW/m9mGvTqBMVfJ4ABf6K/oQGhN7JU0g +21VYUoDMElkb4pE2dyyemrluAptTUfhRA4MuOzJbT26rJei7FTx5i/F54qcsetA4 +pQ5CKf/n7kNeP7A1esa+G473n1iU+7TwfFELtxctwRYo0AGmpExvcymeTbFDRpVh +s/zVtsi2fS6m1hxCzGAk03j+DmhnCpSVBgK2httH26vUjEJHyiEBFOMDFAZD5Q0x +9QKCAQEAmB0BzXXtFgM2o3t9Gw6YTcWm9fXXSQuT1UqVOw1Gbd8q1RHSFzRYL8/f +odWSrvPoZpedToqxXTcNpcWzuKQOxKZMlcUcI5U749hSSBCUnq75MxoVTgy+hkYj +/Ijng9sqkWZxm6m3132BvFmw7KfVSzYUMlPW5tXAnXPouzvLHcTBU8tgC5yKEKpb +4FAnkD9VGHOxgu4RKkuXQ0hklPo1F58qO8J/D0s20mph9x8WFG2icPfJvQj6Ltvc +R/NzlbwUVwstqboRIy9fTn8qMbIpWwPqirCRuI039VX/pUNDJD8AIZ6/KP5D8HR5 +IkAOLnJJ4xiPtupTQCtNpq/469PsWA== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 index 792dc197f86..c6c2da84610 100644 Binary files a/testing/web3signer_tests/tls/web3signer/key.p12 and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index c4722fe5876..86d61fba75f 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE +lighthouse 49:99:C9:A4:05:4C:EC:BE:FD:0B:C3:C3:C1:2F:A4:D3:AB:70:96:47:51:F5:5B:3B:37:65:31:56:18:B7:B8:AD diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 044a622d544..504d96ae1c1 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -17,10 +17,11 @@ beacon_node_fallback = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } -doppelganger_service = { workspace = true } dirs = { workspace = true } -eth2 = { workspace = true } +doppelganger_service = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } +fdlimit = "0.3.0" graffiti_file = { workspace = true } hyper = { workspace = true } initialized_validators = { workspace = true } @@ -29,15 +30,14 @@ monitoring_api = { workspace = true } parking_lot = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -slashing_protection = { workspace = true } serde = { workspace = true } +slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } +tokio = { workspace = true } types = { workspace = true } validator_http_api = { workspace = true } validator_http_metrics = { workspace = true } validator_metrics = { workspace = true } validator_services = { workspace = true } validator_store = { workspace = true } -tokio = { workspace = true } -fdlimit = "0.3.0" diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml index e5f7d3f2ba2..66b61a411b6 100644 --- a/validator_client/doppelganger_service/Cargo.toml +++ b/validator_client/doppelganger_service/Cargo.toml @@ -17,4 +17,4 @@ types = { workspace = true } [dev-dependencies] futures = { workspace = true } -logging = {workspace = true } +logging = { workspace = true } diff --git a/validator_client/graffiti_file/Cargo.toml b/validator_client/graffiti_file/Cargo.toml index 02e48849d10..8868f5aec81 100644 --- a/validator_client/graffiti_file/Cargo.toml +++ b/validator_client/graffiti_file/Cargo.toml @@ -9,11 +9,11 @@ name = "graffiti_file" path = "src/lib.rs" [dependencies] -serde = { workspace = true } bls = { workspace = true } -types = { workspace = true } +serde = { workspace = true } slog = { workspace = true } +types = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } hex = { workspace = true } +tempfile = { workspace = true } diff --git a/validator_client/graffiti_file/src/lib.rs b/validator_client/graffiti_file/src/lib.rs index 0328c14eeb5..9dab2e78272 100644 --- a/validator_client/graffiti_file/src/lib.rs +++ b/validator_client/graffiti_file/src/lib.rs @@ -66,6 +66,9 @@ impl GraffitiFile { for line in lines { let line = line.map_err(|e| Error::InvalidLine(e.to_string()))?; + if line.trim().is_empty() { + continue; + } let (pk_opt, graffiti) = read_line(&line)?; match pk_opt { Some(pk) => { @@ -133,9 +136,15 @@ mod tests { const CUSTOM_GRAFFITI1: &str = "custom-graffiti1"; const CUSTOM_GRAFFITI2: &str = "graffitiwall:720:641:#ffff00"; const EMPTY_GRAFFITI: &str = ""; + // Newline test cases + const CUSTOM_GRAFFITI4: &str = "newlines-tests"; + const PK1: &str = "0x800012708dc03f611751aad7a43a082142832b5c1aceed07ff9b543cf836381861352aa923c70eeb02018b638aa306aa"; const PK2: &str = "0x80001866ce324de7d80ec73be15e2d064dcf121adf1b34a0d679f2b9ecbab40ce021e03bb877e1a2fe72eaaf475e6e21"; const PK3: &str = "0x9035d41a8bc11b08c17d0d93d876087958c9d055afe86fce558e3b988d92434769c8d50b0b463708db80c6aae1160c02"; + const PK4: &str = "0x8c0fca2cc70f44188a4b79e5623ac85898f1df479e14a1f4ebb615907810b6fb939c3fb4ba2081b7a5b6e33dc73621d2"; + const PK5: &str = "0x87998b0ea4a8826f03d1985e5a5ce7235bd3a56fb7559b02a55b737f4ebc69b0bf35444de5cf2680cb7eb2283eb62050"; + const PK6: &str = "0xa2af9b128255568e2ee5c42af118cc4301198123d210dbdbf2ca7ec0222f8d491f308e85076b09a2f44a75875cd6fa0f"; // Create a graffiti file in the required format and return a path to the file. fn create_graffiti_file() -> PathBuf { @@ -143,6 +152,9 @@ mod tests { let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); let pk3 = PublicKeyBytes::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); + let pk4 = PublicKeyBytes::deserialize(&hex::decode(&PK4[2..]).unwrap()).unwrap(); + let pk5 = PublicKeyBytes::deserialize(&hex::decode(&PK5[2..]).unwrap()).unwrap(); + let pk6 = PublicKeyBytes::deserialize(&hex::decode(&PK6[2..]).unwrap()).unwrap(); let file_name = temp.into_path().join("graffiti.txt"); @@ -160,6 +172,29 @@ mod tests { graffiti_file .write_all(format!("{}:{}\n", pk3.as_hex_string(), EMPTY_GRAFFITI).as_bytes()) .unwrap(); + + // Test Lines with leading newlines - these empty lines will be skipped + graffiti_file.write_all(b"\n").unwrap(); + graffiti_file.write_all(b" \n").unwrap(); + graffiti_file + .write_all(format!("{}: {}\n", pk4.as_hex_string(), CUSTOM_GRAFFITI4).as_bytes()) + .unwrap(); + + // Test Empty lines between entries - these will be skipped + graffiti_file.write_all(b"\n").unwrap(); + graffiti_file.write_all(b" \n").unwrap(); + graffiti_file.write_all(b"\t\n").unwrap(); + graffiti_file + .write_all(format!("{}: {}\n", pk5.as_hex_string(), CUSTOM_GRAFFITI4).as_bytes()) + .unwrap(); + + // Test Trailing empty lines - these will be skipped + graffiti_file + .write_all(format!("{}: {}\n", pk6.as_hex_string(), CUSTOM_GRAFFITI4).as_bytes()) + .unwrap(); + graffiti_file.write_all(b"\n").unwrap(); + graffiti_file.write_all(b" \n").unwrap(); + graffiti_file.flush().unwrap(); file_name } @@ -172,6 +207,9 @@ mod tests { let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); let pk3 = PublicKeyBytes::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); + let pk4 = PublicKeyBytes::deserialize(&hex::decode(&PK4[2..]).unwrap()).unwrap(); + let pk5 = PublicKeyBytes::deserialize(&hex::decode(&PK5[2..]).unwrap()).unwrap(); + let pk6 = PublicKeyBytes::deserialize(&hex::decode(&PK6[2..]).unwrap()).unwrap(); // Read once gf.read_graffiti_file().unwrap(); @@ -190,6 +228,20 @@ mod tests { GraffitiString::from_str(EMPTY_GRAFFITI).unwrap().into() ); + // Test newline cases - all empty lines should be skipped + assert_eq!( + gf.load_graffiti(&pk4).unwrap().unwrap(), + GraffitiString::from_str(CUSTOM_GRAFFITI4).unwrap().into() + ); + assert_eq!( + gf.load_graffiti(&pk5).unwrap().unwrap(), + GraffitiString::from_str(CUSTOM_GRAFFITI4).unwrap().into() + ); + assert_eq!( + gf.load_graffiti(&pk6).unwrap().unwrap(), + GraffitiString::from_str(CUSTOM_GRAFFITI4).unwrap().into() + ); + // Random pk should return the default graffiti let random_pk = Keypair::random().pk.compress(); assert_eq!( diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index b83acdc782a..76a021ab8c3 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -10,23 +10,25 @@ path = "src/lib.rs" [dependencies] account_utils = { workspace = true } -bls = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } deposit_contract = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } doppelganger_service = { workspace = true } -graffiti_file = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } +graffiti_file = { workspace = true } initialized_validators = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } parking_lot = { workspace = true } -filesystem = { workspace = true } rand = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } signing_method = { workspace = true } -sensitive_url = { workspace = true } slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } @@ -37,14 +39,15 @@ tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } types = { workspace = true } +url = { workspace = true } validator_dir = { workspace = true } -validator_store = { workspace = true } validator_services = { workspace = true } -url = { workspace = true } -warp_utils = { workspace = true } +validator_store = { workspace = true } warp = { workspace = true } +warp_utils = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -itertools = { workspace = true } futures = { workspace = true } +itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } diff --git a/validator_client/http_api/src/api_secret.rs b/validator_client/http_api/src/api_secret.rs index afcac477ecb..bac54dc8b24 100644 --- a/validator_client/http_api/src/api_secret.rs +++ b/validator_client/http_api/src/api_secret.rs @@ -5,7 +5,7 @@ use std::fs; use std::path::{Path, PathBuf}; use warp::Filter; -/// The name of the file which stores the API token. +/// The default name of the file which stores the API token. pub const PK_FILENAME: &str = "api-token.txt"; pub const PK_LEN: usize = 33; @@ -31,14 +31,32 @@ pub struct ApiSecret { impl ApiSecret { /// If the public key is already on-disk, use it. /// - /// The provided `dir` is a directory containing `PK_FILENAME`. + /// The provided `pk_path` is a path containing API token. /// /// If the public key file is missing on disk, create a new key and /// write it to disk (over-writing any existing files). - pub fn create_or_open>(dir: P) -> Result { - let pk_path = dir.as_ref().join(PK_FILENAME); + pub fn create_or_open>(pk_path: P) -> Result { + let pk_path = pk_path.as_ref(); + + // Check if the path is a directory + if pk_path.is_dir() { + return Err(format!( + "API token path {:?} is a directory, not a file", + pk_path + )); + } if !pk_path.exists() { + // Create parent directories if they don't exist + if let Some(parent) = pk_path.parent() { + std::fs::create_dir_all(parent).map_err(|e| { + format!( + "Unable to create parent directories for {:?}: {:?}", + pk_path, e + ) + })?; + } + let length = PK_LEN; let pk: String = thread_rng() .sample_iter(&Alphanumeric) @@ -47,7 +65,7 @@ impl ApiSecret { .collect(); // Create and write the public key to file with appropriate permissions - create_with_600_perms(&pk_path, pk.to_string().as_bytes()).map_err(|e| { + create_with_600_perms(pk_path, pk.to_string().as_bytes()).map_err(|e| { format!( "Unable to create file with permissions for {:?}: {:?}", pk_path, e @@ -55,13 +73,16 @@ impl ApiSecret { })?; } - let pk = fs::read(&pk_path) - .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))? + let pk = fs::read(pk_path) + .map_err(|e| format!("cannot read {}: {}", pk_path.display(), e))? .iter() .map(|&c| char::from(c)) .collect(); - Ok(Self { pk, pk_path }) + Ok(Self { + pk, + pk_path: pk_path.to_path_buf(), + }) } /// Returns the API token. diff --git a/validator_client/http_api/src/create_validator.rs b/validator_client/http_api/src/create_validator.rs index dfd092e8b46..f90a1057a43 100644 --- a/validator_client/http_api/src/create_validator.rs +++ b/validator_client/http_api/src/create_validator.rs @@ -2,7 +2,7 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition} use account_utils::{ eth2_keystore::Keystore, eth2_wallet::{bip39::Mnemonic, WalletBuilder}, - random_mnemonic, random_password, ZeroizeString, + random_mnemonic, random_password, }; use eth2::lighthouse_vc::types::{self as api_types}; use slot_clock::SlotClock; @@ -11,6 +11,7 @@ use types::ChainSpec; use types::EthSpec; use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; use validator_store::ValidatorStore; +use zeroize::Zeroizing; /// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in /// this validator client. @@ -59,7 +60,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, for request in validator_requests { let voting_password = random_password(); let withdrawal_password = random_password(); - let voting_password_string = ZeroizeString::from( + let voting_password_string = Zeroizing::from( String::from_utf8(voting_password.as_bytes().to_vec()).map_err(|e| { warp_utils::reject::custom_server_error(format!( "locally generated password is not utf8: {:?}", @@ -199,7 +200,7 @@ pub async fn create_validators_web3signer( pub fn get_voting_password_storage( secrets_dir: &Option, voting_keystore: &Keystore, - voting_password_string: &ZeroizeString, + voting_password_string: &Zeroizing, ) -> Result { if let Some(secrets_dir) = &secrets_dir { let password_path = keystore_password_path(secrets_dir, voting_keystore); diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index 5822c89cb8a..fd6b4fdae51 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -1,5 +1,5 @@ //! Implementation of the standard keystore management API. -use account_utils::{validator_definitions::PasswordStorage, ZeroizeString}; +use account_utils::validator_definitions::PasswordStorage; use eth2::lighthouse_vc::{ std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, @@ -22,6 +22,7 @@ use validator_dir::{keystore_password_path, Builder as ValidatorDirBuilder}; use validator_store::ValidatorStore; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; +use zeroize::Zeroizing; pub fn list( validator_store: Arc>, @@ -167,7 +168,7 @@ pub fn import( fn import_single_keystore( keystore: Keystore, - password: ZeroizeString, + password: Zeroizing, validator_dir_path: PathBuf, secrets_dir: Option, validator_store: &ValidatorStore, diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index b58c7ccec02..73ebe717af3 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -7,6 +7,7 @@ mod remotekeys; mod tests; pub mod test_utils; +pub use api_secret::PK_FILENAME; use graffiti::{delete_graffiti, get_graffiti, set_graffiti}; @@ -23,6 +24,7 @@ use beacon_node_fallback::CandidateInfo; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, }; +use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_DIR}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ @@ -99,10 +101,18 @@ pub struct Config { pub allow_origin: Option, pub allow_keystore_export: bool, pub store_passwords_in_secrets_dir: bool, + pub http_token_path: PathBuf, } impl Default for Config { fn default() -> Self { + // This value is always overridden when building config from CLI. + let http_token_path = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_NETWORK) + .join(DEFAULT_VALIDATOR_DIR) + .join(PK_FILENAME); Self { enabled: false, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -110,6 +120,7 @@ impl Default for Config { allow_origin: None, allow_keystore_export: false, store_passwords_in_secrets_dir: false, + http_token_path, } } } diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 931c4ea08ed..390095eec73 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -1,8 +1,8 @@ +use crate::api_secret::PK_FILENAME; use crate::{ApiSecret, Config as HttpConfig, Context}; use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, - ZeroizeString, }; use deposit_contract::decode_eth1_tx_data; use doppelganger_service::DoppelgangerService; @@ -28,6 +28,7 @@ use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; use tokio::sync::oneshot; use validator_store::{Config as ValidatorStoreConfig, ValidatorStore}; +use zeroize::Zeroizing; pub const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -73,6 +74,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join(PK_FILENAME); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -85,7 +87,7 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(token_path).unwrap(); let api_pubkey = api_secret.api_token(); let config = ValidatorStoreConfig { @@ -177,6 +179,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: tempdir().unwrap().path().join(PK_FILENAME), } } @@ -199,8 +202,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap() } @@ -321,7 +324,7 @@ impl ApiTester { .collect::>(); let (response, mnemonic) = if s.specify_mnemonic { - let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let mnemonic = Zeroizing::from(random_mnemonic().phrase().to_string()); let request = CreateValidatorsMnemonicRequest { mnemonic: mnemonic.clone(), key_derivation_path_offset: s.key_derivation_path_offset, diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 76a6952153b..7ea3d7ebaab 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -9,7 +9,7 @@ use initialized_validators::{Config as InitializedValidatorsConfig, InitializedV use crate::{ApiSecret, Config as HttpConfig, Context}; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, - random_password_string, validator_definitions::ValidatorDefinitions, ZeroizeString, + random_password_string, validator_definitions::ValidatorDefinitions, }; use deposit_contract::decode_eth1_tx_data; use eth2::{ @@ -33,6 +33,7 @@ use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; use types::graffiti::GraffitiString; use validator_store::{Config as ValidatorStoreConfig, ValidatorStore}; +use zeroize::Zeroizing; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); @@ -52,8 +53,10 @@ struct ApiTester { impl ApiTester { pub async fn new() -> Self { - let mut config = ValidatorStoreConfig::default(); - config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); + let config = ValidatorStoreConfig { + fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), + ..Default::default() + }; Self::new_with_config(config).await } @@ -62,6 +65,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join("api-token.txt"); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -74,7 +78,7 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(&token_path).unwrap(); let api_pubkey = api_secret.api_token(); let spec = Arc::new(E::default_spec()); @@ -126,6 +130,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: token_path, }, sse_logging_components: None, log, @@ -136,7 +141,7 @@ impl ApiTester { let (listening_socket, server) = super::serve(ctx, test_runtime.task_executor.exit()).unwrap(); - tokio::spawn(async { server.await }); + tokio::spawn(server); let url = SensitiveUrl::parse(&format!( "http://{}:{}", @@ -160,8 +165,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey.clone()).unwrap() } @@ -282,7 +287,7 @@ impl ApiTester { .collect::>(); let (response, mnemonic) = if s.specify_mnemonic { - let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let mnemonic = Zeroizing::from(random_mnemonic().phrase().to_string()); let request = CreateValidatorsMnemonicRequest { mnemonic: mnemonic.clone(), key_derivation_path_offset: s.key_derivation_path_offset, @@ -342,22 +347,21 @@ impl ApiTester { .set_nextaccount(s.key_derivation_path_offset) .unwrap(); - for i in 0..s.count { + for validator in response.iter().take(s.count) { let keypairs = wallet .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) .unwrap(); let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); assert_eq!( - response[i].voting_pubkey, + validator.voting_pubkey, voting_keypair.pk.clone().into(), "the locally generated voting pk should match the server response" ); let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); - let deposit_bytes = - serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + let deposit_bytes = serde_utils::hex::decode(&validator.eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index f3f6de548bf..6559a2bb9e5 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -14,8 +14,9 @@ use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; use types::{attestation::AttestationBase, Address}; use validator_store::DEFAULT_GAS_LIMIT; +use zeroize::Zeroizing; -fn new_keystore(password: ZeroizeString) -> Keystore { +fn new_keystore(password: Zeroizing) -> Keystore { let keypair = Keypair::random(); Keystore( KeystoreBuilder::new(&keypair, password.as_ref(), String::new()) @@ -129,7 +130,7 @@ fn check_keystore_get_response<'a>( for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) { assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2)); assert_eq!(ks1.derivation_path, ks2.path()); - assert!(ks1.readonly == None || ks1.readonly == Some(false)); + assert!(ks1.readonly.is_none() || ks1.readonly == Some(false)); } } @@ -146,7 +147,7 @@ fn check_keystore_import_response( } } -fn check_keystore_delete_response<'a>( +fn check_keystore_delete_response( response: &DeleteKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -633,7 +634,7 @@ async fn check_get_set_fee_recipient() { assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: TEST_DEFAULT_FEE_RECIPIENT, } ); @@ -653,7 +654,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_1.clone(), + ethaddress: fee_recipient_public_key_1, }, ) .await @@ -666,14 +667,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -685,7 +686,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[2], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_2.clone(), + ethaddress: fee_recipient_public_key_2, }, ) .await @@ -698,16 +699,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -719,7 +720,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_override.clone(), + ethaddress: fee_recipient_override, }, ) .await @@ -731,16 +732,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_override.clone() + fee_recipient_override } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -760,14 +761,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -813,7 +814,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: DEFAULT_GAS_LIMIT, } ); @@ -842,14 +843,14 @@ async fn check_get_set_gas_limit() { .await .expect("should get gas limit"); let expected = if i == 1 { - gas_limit_public_key_1.clone() + gas_limit_public_key_1 } else { DEFAULT_GAS_LIMIT }; assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -883,7 +884,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -916,7 +917,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -943,7 +944,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -1304,7 +1305,7 @@ async fn delete_concurrent_with_signing() { let handle = handle.spawn(async move { for j in 0..num_attestations { let mut att = make_attestation(j, j + 1); - for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + for public_key in thread_pubkeys.iter() { let _ = validator_store .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) .await; @@ -2083,7 +2084,7 @@ async fn import_remotekey_web3signer_disabled() { web3signer_req.enable = false; // Import web3signers. - let _ = tester + tester .client .post_lighthouse_validators_web3signer(&vec![web3signer_req]) .await @@ -2147,8 +2148,11 @@ async fn import_remotekey_web3signer_enabled() { // 1 validator imported. assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions(); + let web3_vals = tester + .initialized_validators + .read() + .validator_definitions() + .to_vec(); // Import remotekeys. let import_res = tester @@ -2165,11 +2169,13 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions(); + { + let vals = tester.initialized_validators.read(); + let remote_vals = vals.validator_definitions(); - // Web3signer should not be overwritten since it is enabled. - assert!(web3_vals == remote_vals); + // Web3signer should not be overwritten since it is enabled. + assert!(web3_vals == remote_vals); + } // Remotekey should not be imported. let expected_responses = vec![SingleListRemotekeysResponse { diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml index a9de26a55bb..c29a4d18fa0 100644 --- a/validator_client/http_metrics/Cargo.toml +++ b/validator_client/http_metrics/Cargo.toml @@ -5,16 +5,16 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +lighthouse_version = { workspace = true } malloc_utils = { workspace = true } -slot_clock = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } slog = { workspace = true } -warp_utils = { workspace = true } -warp = { workspace = true } -lighthouse_version = { workspace = true } +slot_clock = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } validator_services = { workspace = true } validator_store = { workspace = true } -validator_metrics = { workspace = true } -types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } diff --git a/validator_client/initialized_validators/Cargo.toml b/validator_client/initialized_validators/Cargo.toml index 426cb303f6e..05e85261f9a 100644 --- a/validator_client/initialized_validators/Cargo.toml +++ b/validator_client/initialized_validators/Cargo.toml @@ -5,22 +5,23 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] -signing_method = { workspace = true } account_utils = { workspace = true } +bincode = { workspace = true } +bls = { workspace = true } eth2_keystore = { workspace = true } -metrics = { workspace = true } +filesystem = { workspace = true } lockfile = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +signing_method = { workspace = true } slog = { workspace = true } +tokio = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } -rand = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -bls = { workspace = true } -tokio = { workspace = true } -bincode = { workspace = true } -filesystem = { workspace = true } validator_metrics = { workspace = true } +zeroize = { workspace = true } diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index 0b36dbd62cf..bd64091dae4 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -14,7 +14,6 @@ use account_utils::{ self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, CONFIG_FILENAME, }, - ZeroizeString, }; use eth2_keystore::Keystore; use lockfile::{Lockfile, LockfileError}; @@ -34,6 +33,7 @@ use types::graffiti::GraffitiString; use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; +use zeroize::Zeroizing; use key_cache::KeyCache; @@ -74,7 +74,7 @@ pub enum OnDecryptFailure { pub struct KeystoreAndPassword { pub keystore: Keystore, - pub password: Option, + pub password: Option>, } #[derive(Debug)] @@ -262,7 +262,7 @@ impl InitializedValidator { // If the password is supplied, use it and ignore the path // (if supplied). (_, Some(password)) => ( - password.as_ref().to_vec().into(), + password.as_bytes().to_vec().into(), keystore .decrypt_keypair(password.as_ref()) .map_err(Error::UnableToDecryptKeystore)?, @@ -282,7 +282,7 @@ impl InitializedValidator { &keystore, &keystore_path, )?; - (password.as_ref().to_vec().into(), keypair) + (password.as_bytes().to_vec().into(), keypair) } }, ) @@ -455,7 +455,7 @@ fn build_web3_signer_client( fn unlock_keystore_via_stdin_password( keystore: &Keystore, keystore_path: &Path, -) -> Result<(ZeroizeString, Keypair), Error> { +) -> Result<(Zeroizing, Keypair), Error> { eprintln!(); eprintln!( "The {} file does not contain either of the following fields for {:?}:", @@ -1172,14 +1172,14 @@ impl InitializedValidators { voting_keystore_path, } => { let pw = if let Some(p) = voting_keystore_password { - p.as_ref().to_vec().into() + p.as_bytes().to_vec().into() } else if let Some(path) = voting_keystore_password_path { read_password(path).map_err(Error::UnableToReadVotingKeystorePassword)? } else { let keystore = open_keystore(voting_keystore_path)?; unlock_keystore_via_stdin_password(&keystore, voting_keystore_path)? .0 - .as_ref() + .as_bytes() .to_vec() .into() }; @@ -1425,7 +1425,7 @@ impl InitializedValidators { /// This should only be used for testing, it's rather destructive. pub fn delete_passwords_from_validator_definitions( &mut self, - ) -> Result, Error> { + ) -> Result>, Error> { let mut passwords = HashMap::default(); for def in self.definitions.as_mut_slice() { diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 0f3852eff67..3e1a48142f9 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -6,12 +6,12 @@ authors = ["Sigma Prime "] [dependencies] eth2_keystore = { workspace = true } +ethereum_serde_utils = { workspace = true } lockfile = { workspace = true } parking_lot = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } task_executor = { workspace = true } types = { workspace = true } url = { workspace = true } validator_metrics = { workspace = true } -serde = { workspace = true } -ethereum_serde_utils = { workspace = true } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index 2fe4af39d3a..f3b62c9500b 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -49,7 +49,7 @@ pub enum SignableMessage<'a, E: EthSpec, Payload: AbstractExecPayload = FullP VoluntaryExit(&'a VoluntaryExit), } -impl<'a, E: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, E, Payload> { +impl> SignableMessage<'_, E, Payload> { /// Returns the `SignedRoot` for the contained message. /// /// The actual `SignedRoot` trait is not used since it also requires a `TreeHash` impl, which is diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6982958bd56..1a098742d89 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -10,16 +10,16 @@ name = "slashing_protection_tests" path = "tests/main.rs" [dependencies] -tempfile = { workspace = true } -types = { workspace = true } -rusqlite = { workspace = true } +arbitrary = { workspace = true, features = ["derive"] } +ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" +rusqlite = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -ethereum_serde_utils = { workspace = true } -filesystem = { workspace = true } -arbitrary = { workspace = true, features = ["derive"] } +tempfile = { workspace = true } +types = { workspace = true } [dev-dependencies] rayon = { workspace = true } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 209876f07b0..b2d1ebb3c25 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -247,6 +247,18 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("http-token-path") + .long("http-token-path") + .requires("http") + .value_name("HTTP_TOKEN_PATH") + .help( + "Path to file containing the HTTP API token for validator client authentication. \ + If not specified, defaults to {validators-dir}/api-token.txt." + ) + .action(ArgAction::Set) + .display_order(0) + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::new("metrics") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index abdadeb393b..bb72ef81c80 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -17,7 +17,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; -use validator_http_api; +use validator_http_api::{self, PK_FILENAME}; use validator_http_metrics; use validator_store::Config as ValidatorStoreConfig; @@ -314,6 +314,13 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } + if let Some(http_token_path) = cli_args.get_one::("http-token-path") { + config.http_api.http_token_path = PathBuf::from(http_token_path); + } else { + // For backward compatibility, default to the path under the validator dir if not provided. + config.http_api.http_token_path = config.validator_dir.join(PK_FILENAME); + } + /* * Prometheus metrics HTTP server */ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 2cc22357fbc..8ebfe98b15e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -551,7 +551,7 @@ impl ProductionValidatorClient { let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); let log = self.context.log(); - let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; + let api_secret = ApiSecret::create_or_open(&self.config.http_api.http_token_path)?; self.http_api_listen_addr = if self.config.http_api.enabled { let ctx = Arc::new(validator_http_api::Context { diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index 7dcd815541e..21f0ae2d776 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -6,18 +6,18 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } -validator_metrics = { workspace = true } -validator_store = { workspace = true } -graffiti_file = { workspace = true } +bls = { workspace = true } doppelganger_service = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } +graffiti_file = { workspace = true } parking_lot = { workspace = true } safe_arith = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } tokio = { workspace = true } -types = { workspace = true } tree_hash = { workspace = true } -bls = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } +validator_store = { workspace = true } diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 4f367b8f5b1..7cb05616f47 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -2,27 +2,27 @@ name = "validator_manager" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +account_utils = { workspace = true } clap = { workspace = true } -types = { workspace = true } +clap_utils = { workspace = true } +derivative = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } eth2_network_config = { workspace = true } -clap_utils = { workspace = true } eth2_wallet = { workspace = true } -account_utils = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } ethereum_serde_utils = { workspace = true } -tree_hash = { workspace = true } -eth2 = { workspace = true } hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } tokio = { workspace = true } -derivative = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } regex = { workspace = true } +tempfile = { workspace = true } validator_http_api = { workspace = true } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 4a35791b322..cc4157990fd 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,5 +1,5 @@ +use account_utils::strip_off_newlines; pub use account_utils::STDIN_INPUTS_FLAG; -use account_utils::{strip_off_newlines, ZeroizeString}; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ lighthouse_vc::{ @@ -14,6 +14,7 @@ use std::fs; use std::path::{Path, PathBuf}; use tree_hash::TreeHash; use types::*; +use zeroize::Zeroizing; pub const IGNORE_DUPLICATES_FLAG: &str = "ignore-duplicates"; pub const COUNT_FLAG: &str = "count"; @@ -41,7 +42,7 @@ pub enum UploadError { #[derive(Clone, Serialize, Deserialize)] pub struct ValidatorSpecification { pub voting_keystore: KeystoreJsonStr, - pub voting_keystore_password: ZeroizeString, + pub voting_keystore_password: Zeroizing, pub slashing_protection: Option, pub fee_recipient: Option
, pub gas_limit: Option, diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 2a819a2a645..3cebc10bb38 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,6 +1,6 @@ use super::common::*; use crate::DumpConfig; -use account_utils::{eth2_keystore::Keystore, ZeroizeString}; +use account_utils::eth2_keystore::Keystore; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use derivative::Derivative; @@ -10,6 +10,7 @@ use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use types::Address; +use zeroize::Zeroizing; pub const CMD: &str = "import"; pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; @@ -167,7 +168,7 @@ pub struct ImportConfig { pub vc_token_path: PathBuf, pub ignore_duplicates: bool, #[derivative(Debug = "ignore")] - pub password: Option, + pub password: Option>, pub fee_recipient: Option
, pub gas_limit: Option, pub builder_proposals: Option, @@ -184,7 +185,7 @@ impl ImportConfig { vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), - password: clap_utils::parse_optional(matches, PASSWORD)?, + password: clap_utils::parse_optional(matches, PASSWORD)?.map(Zeroizing::new), fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT)?, builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS)?, @@ -382,10 +383,7 @@ async fn run<'a>(config: ImportConfig) -> Result<(), String> { pub mod tests { use super::*; use crate::create_validators::tests::TestBuilder as CreateTestBuilder; - use std::{ - fs::{self, File}, - str::FromStr, - }; + use std::fs::{self, File}; use tempfile::{tempdir, TempDir}; use validator_http_api::{test_utils::ApiTester, Config as HttpConfig}; @@ -419,7 +417,7 @@ pub mod tests { vc_url: vc.url.clone(), vc_token_path, ignore_duplicates: false, - password: Some(ZeroizeString::from_str("password").unwrap()), + password: Some(Zeroizing::new("password".into())), fee_recipient: None, builder_boost_factor: None, gas_limit: None, @@ -522,7 +520,7 @@ pub mod tests { let local_validators: Vec = { let contents = - fs::read_to_string(&self.import_config.validators_file_path.unwrap()) + fs::read_to_string(self.import_config.validators_file_path.unwrap()) .unwrap(); serde_json::from_str(&contents).unwrap() }; @@ -559,7 +557,7 @@ pub mod tests { self.vc.ensure_key_cache_consistency().await; let local_keystore: Keystore = - Keystore::from_json_file(&self.import_config.keystore_file_path.unwrap()) + Keystore::from_json_file(self.import_config.keystore_file_path.unwrap()) .unwrap(); let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 807a147ca1a..4d0820f5a8b 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,6 +1,6 @@ use super::common::*; use crate::DumpConfig; -use account_utils::{read_password_from_user, ZeroizeString}; +use account_utils::read_password_from_user; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ lighthouse_vc::{ @@ -19,6 +19,7 @@ use std::str::FromStr; use std::time::Duration; use tokio::time::sleep; use types::{Address, PublicKeyBytes}; +use zeroize::Zeroizing; pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; pub const VALIDATOR_SPECIFICATION_FILE: &str = "validator-specification.json"; @@ -48,7 +49,7 @@ pub enum PasswordSource { } impl PasswordSource { - fn read_password(&mut self, pubkey: &PublicKeyBytes) -> Result { + fn read_password(&mut self, pubkey: &PublicKeyBytes) -> Result, String> { match self { PasswordSource::Interactive { stdin_inputs } => { eprintln!("Please enter a password for keystore {:?}:", pubkey); @@ -977,13 +978,13 @@ mod test { }) .unwrap(); // Set all definitions to use the same password path as the primary. - definitions.iter_mut().enumerate().for_each(|(_, def)| { - match &mut def.signing_definition { - SigningDefinition::LocalKeystore { - voting_keystore_password_path: Some(path), - .. - } => *path = primary_path.clone(), - _ => (), + definitions.iter_mut().for_each(|def| { + if let SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } = &mut def.signing_definition + { + *path = primary_path.clone() } }) } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 9e8da3b293b..41cfb58e287 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -10,37 +10,36 @@ path = "src/lib.rs" [[bin]] name = "watch" path = "src/main.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +axum = "0.7" +beacon_node = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -log = { workspace = true } +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } env_logger = { workspace = true } -types = { workspace = true } eth2 = { workspace = true } -beacon_node = { workspace = true } -tokio = { workspace = true } -axum = "0.7" hyper = { workspace = true } +log = { workspace = true } +r2d2 = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -rand = { workspace = true } -diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } -diesel_migrations = { version = "2.0.0", features = ["postgres"] } -bls = { workspace = true } -r2d2 = { workspace = true } serde_yaml = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } [dev-dependencies] -tokio-postgres = "0.7.5" -http_api = { workspace = true } beacon_chain = { workspace = true } +http_api = { workspace = true } +logging = { workspace = true } network = { workspace = true } +task_executor = { workspace = true } testcontainers = "0.15" +tokio-postgres = "0.7.5" unused_port = { workspace = true } -task_executor = { workspace = true } -logging = { workspace = true } diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs index b31583c6299..7193b0744aa 100644 --- a/watch/src/database/mod.rs +++ b/watch/src/database/mod.rs @@ -109,9 +109,9 @@ pub fn get_active_config(conn: &mut PgConn) -> Result, Err .optional()?) } -/// -/// INSERT statements -/// +/* + * INSERT statements + */ /// Inserts a single row into the `canonical_slots` table. /// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. @@ -245,9 +245,9 @@ pub fn insert_batch_validators( Ok(()) } -/// -/// SELECT statements -/// +/* + * SELECT statements + */ /// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. pub fn get_canonical_slot( @@ -746,9 +746,9 @@ pub fn count_validators_activated_before_slot( .map_err(Error::Database) } -/// -/// DELETE statements. -/// +/* + * DELETE statements. + */ /// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. /// diff --git a/wordlist.txt b/wordlist.txt new file mode 100644 index 00000000000..6287366cbcb --- /dev/null +++ b/wordlist.txt @@ -0,0 +1,234 @@ +APIs +ARMv +AUR +Backends +Backfilling +Beaconcha +Besu +Broadwell +BIP +BLS +BN +BNs +BTC +BTEC +Casper +CentOS +Chiado +CMake +CoinCashew +Consensys +CORS +CPUs +DBs +DES +DHT +DNS +Dockerhub +DoS +EIP +ENR +Erigon +Esat's +ETH +EthDocker +Ethereum +Ethstaker +Exercism +Extractable +FFG +Geth +Gitcoin +Gnosis +Goerli +Grafana +Holesky +Homebrew +Infura +IPs +IPv +JSON +KeyManager +Kurtosis +LMDB +LLVM +LRU +LTO +Mainnet +MDBX +Merkle +MEV +MSRV +NAT's +Nethermind +NodeJS +NullLogger +PathBuf +PowerShell +PPA +Pre +Proto +PRs +Prysm +QUIC +RasPi +README +RESTful +Reth +RHEL +Ropsten +RPC +Ryzen +Sepolia +Somer +SSD +SSL +SSZ +Styleguide +TCP +Teku +TLS +TODOs +UDP +UI +UPnP +USD +UX +Validator +VC +VCs +VPN +Withdrawable +WSL +YAML +aarch +anonymize +api +attester +backend +backends +backfill +backfilling +beaconcha +bitfield +blockchain +bn +cli +clippy +config +cpu +cryptocurrencies +cryptographic +danksharding +datadir +datadirs +de +decrypt +decrypted +dest +dir +disincentivise +doppelgänger +dropdown +else's +env +eth +ethdo +ethereum +ethstaker +filesystem +frontend +gapped +github +graffitis +gwei +hdiffs +homebrew +hostname +html +http +https +hDiff +implementers +interoperable +io +iowait +jemalloc +json +jwt +kb +keymanager +keypair +keypairs +keystore +keystores +linter +linux +localhost +lossy +macOS +mainnet +makefile +mdBook +mev +misconfiguration +mkcert +namespace +natively +nd +ness +nginx +nitty +oom +orging +orgs +os +paul +pem +performant +pid +pre +pubkey +pubkeys +rc +reimport +resync +roadmap +rustfmt +rustup +schemas +sigmaprime +sigp +slashable +slashings +spec'd +src +stakers +subnet +subnets +systemd +testnet +testnets +th +toml +topologies +tradeoffs +transactional +tweakers +ui +unadvanced +unaggregated +unencrypted +unfinalized +untrusted +uptimes +url +validator +validators +validator's +vc +virt +webapp +withdrawable +yaml +yml