diff --git a/.cargo/config.toml b/.cargo/config.toml index b75f6352f5b..6be194a0c74 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,11 +1,5 @@ # Zebra cargo configuration -# Disabled until we upgrade to abscissa 0.7 or later: -# https://github.com/ZcashFoundation/zebra/issues/5502 -# https://doc.rust-lang.org/cargo/reference/future-incompat-report.html -[future-incompat-report] -frequency = "never" - # Flags that apply to all Zebra crates and configurations [target.'cfg(all())'] rustflags = [ @@ -61,12 +55,6 @@ rustflags = [ # Documentation "-Wmissing_docs", - # These rustdoc -A and -W settings must be the same as the RUSTDOCFLAGS in: - # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/lint.yml#L152 - - # Links in public docs can point to private items. - "-Arustdoc::private_intra_doc_links", - # TODOs: # `cargo fix` might help do these fixes, # or add a config.toml to sub-directories which should allow these lints, @@ -88,3 +76,12 @@ rustflags = [ # fix hidden lifetime parameters #"-Wrust_2018_idioms", ] + +[build] +rustdocflags = [ + # The -A and -W settings must be the same as the `RUSTDOCFLAGS` in: + # https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/lint.yml#L151 + + # Links in public docs can point to private items. + "-Arustdoc::private_intra_doc_links", +] diff --git a/.dockerignore b/.dockerignore index 12a78c0d76b..12057f20ac8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,11 +1,11 @@ -# Before the docker CLI sends the context to the docker daemon, it looks for a file -# named .dockerignore in the root directory of the context. If this file exists, the -# CLI modifies the context to exclude files and directories that match patterns in it. +# Before the docker CLI sends the context to the docker daemon, it looks for a file +# named .dockerignore in the root directory of the context. If this file exists, the +# CLI modifies the context to exclude files and directories that match patterns in it. # -# You may want to specify which files to include in the context, rather than which -# to exclude. To achieve this, specify * as the first pattern, followed by one or +# You may want to specify which files to include in the context, rather than which +# to exclude. To achieve this, specify * as the first pattern, followed by one or # more ! exception patterns. -# +# # https://docs.docker.com/engine/reference/builder/#dockerignore-file # Exclude everything: @@ -21,3 +21,4 @@ !zebra-* !zebrad !docker/entrypoint.sh +!docker/runtime-entrypoint.sh diff --git a/.firebaserc b/.firebaserc new file mode 100644 index 00000000000..edf98286677 --- /dev/null +++ b/.firebaserc @@ -0,0 +1,21 @@ +{ + "projects": { + "default": "zfnd-prod-zebra" + }, + "targets": { + "zfnd-prod-zebra": { + "hosting": { + "docs-book": [ + "zebra-docs-book" + ], + "docs-external": [ + "zebra-docs-external" + ], + "docs-internal": [ + "zebra-docs-internal" + ] + } + } + }, + "etags": {} +} diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d53cdbc8b00..33478d31539 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -14,7 +14,7 @@ /zebrad/src/commands/start.rs @ZcashFoundation/general-rust-reviewers # Network and Async Code -/tower-batch/ @ZcashFoundation/network-reviewers +/tower-batch-control/ @ZcashFoundation/network-reviewers /tower-fallback/ @ZcashFoundation/network-reviewers /zebra-network/ @ZcashFoundation/network-reviewers /zebra-node-services/ @ZcashFoundation/network-reviewers diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 2ebcafc15b7..00000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: C-bug, S-needs-triage -assignees: '' - ---- - -## Description - - - -[short summary of the bug] - -### Steps to Reproduce - -I tried this: - -[behavior or code sample that causes the bug] - -```sh -copy and paste the exact commands or code here -``` - -### Expected Behaviour - -I expected to see this happen: [explanation] - -### Actual Behaviour - -Instead, this happened: [explanation] - -### Zebra Logs - - - -
- -``` -copy and paste the logs here -``` - -
- -## Environment - -### Zebra Version - - - -### Operating System - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000000..dfb6ef72758 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,73 @@ +name: '🐛 Bug report' +description: Create a report to help us improve +title: 'bug: ' +labels: [C-bug, S-needs-triage] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to report a bug in Zebra! + + Please fill out the sections below to help us reproduce and fix the bug. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + value: ' + I expected to see this happen: + + + Instead, this happened: + ' + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: What were you doing when the issue happened? + description: Copy and paste the exact commands or code here. + placeholder: 'Behavior or code sample that causes the bug' + validations: + required: false + - type: textarea + id: logs + attributes: + label: Zebra logs + description: Copy and paste the last 100 Zebra log lines or upload the full logs to https://gist.github.com/ and add a link to them here. + placeholder: 'Copy and paste the logs here' + validations: + required: false + - type: input + id: zebrad-version + attributes: + label: Zebra Version + description: 'For bugs in `zebrad`, run `zebrad --version`.' + placeholder: 'zebrad 1.0.0-placeholder' + validations: + required: false + - type: checkboxes + id: os + attributes: + label: Which operating systems does the issue happen on? + description: You may select more than one. + options: + - label: Linux + - label: macOS + - label: Windows + - label: Other OS + - type: input + id: os-details + attributes: + label: OS details + description: 'Linux, macOS, BSD: the output of `uname -a`; Windows: version and 32-bit or 64-bit; Other OS: name and version' + validations: + required: false + - type: textarea + id: anything-else + attributes: + label: Additional information + description: Is there anything else that could help us solve this issue? + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 0086358db1e..e1816836e3b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1 +1,8 @@ -blank_issues_enabled: true +blank_issues_enabled: false +contact_links: + - name: 💬 Zcash Community Support + url: https://forum.zcashcommunity.com/ + about: You're invited to ask questions about the ecosystem, community and Zebra + - name: ❓ General Questions about Zebra + url: https://github.com/ZcashFoundation/zebra/discussions/categories/q-a + about: Please ask and answer questions about Zebra as a discussion threads diff --git a/.github/ISSUE_TEMPLATE/devops_report.yml b/.github/ISSUE_TEMPLATE/devops_report.yml new file mode 100644 index 00000000000..3e5d56fe10b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/devops_report.yml @@ -0,0 +1,67 @@ +--- +name: '🚦 DevOps Report' +description: Issues related to the Zebra build, test, or release process. +title: 'devops: ' +labels: [A-devops, C-bug, S-needs-triage] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to report a bug in Zebra! + + Please fill out the sections below to help us reproduce and fix the bug. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) + - type: textarea + id: description + attributes: + label: Describe the issue or request + description: What is the problem? A clear and concise description of the bug. + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: | + What did you expect to happen? + validations: + required: false + - type: textarea + id: current + attributes: + label: Current Behavior + description: | + What actually happened? + + Please include full errors, uncaught exceptions, stack traces, and relevant logs. + Links to the faulty logs in GitHub Actions or other places are also welcomed. + validations: + required: false + - type: textarea + id: solution + attributes: + label: Possible Solution + description: | + Suggest a fix/reason for the bug + validations: + required: false + - type: textarea + id: context + attributes: + label: Additional Information/Context + description: | + Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful for the community. + validations: + required: false + - type: input + id: on-prs + attributes: + label: Is this happening on PRs? + validations: + required: false + - type: input + id: on-main + attributes: + label: Is this happening on the main branch? + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 9ac8226f988..00000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -name: Change request -about: Suggest a feature or change for this project -title: '' -labels: C-enhancement, S-needs-triage -assignees: '' - ---- - -## Motivation - - - -### Specifications - - - -### Complex Code or Requirements - - - -### Testing - - - -## Related Work - - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000000..5d40057589b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,57 @@ +--- +name: "🚧 Change request" +description: Suggest a feature or change for this project +title: 'feature: ' +labels: [C-enhancement, S-needs-triage] +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to suggest a feature or change for Zebra! + + Please fill out the sections below to help us understand your request. + If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions) + - type: textarea + id: motivation + attributes: + label: Motivation + description: | + Is your feature request related to a problem? + How does this change improve Zebra? + validations: + required: true + - type: textarea + id: specs + attributes: + label: Specifications + description: | + If this change is based on consensus rules, quote them, and link to the Zcash spec or ZIP: https://zips.z.cash/#nu5-zips + If this changes network behaviour, quote and link to the Bitcoin network reference: https://developer.bitcoin.org/reference/p2p_networking.html + validations: + required: false + - type: textarea + id: complexity + attributes: + label: Complex Code or Requirements + description: | + Does this PR change concurrency, unsafe code, or complex consensus rules? + If it does, explain how we will implement, review, and test it. + validations: + required: false + - type: textarea + id: tests + attributes: + label: Testing + description: | + How can we check that this change does what we want it to do? + validations: + required: false + - type: textarea + id: related + attributes: + label: Related Work + description: | + Is this change related to other features or tickets? + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/private_security_issue.yml b/.github/ISSUE_TEMPLATE/private_security_issue.yml index ffd7e69154d..82e6e88dce4 100644 --- a/.github/ISSUE_TEMPLATE/private_security_issue.yml +++ b/.github/ISSUE_TEMPLATE/private_security_issue.yml @@ -1,15 +1,20 @@ --- -name: Private Security Issue -about: Zebra team use only +name: '🔓 Private Security Issue' +description: Zebra team use only title: 'Security Issue #NNN' -labels: C-security, S-needs-triage -assignees: '' +labels: [C-security, S-needs-triage] +body: + - type: markdown + attributes: + value: | + This ticket is a public placeholder for a security issue that the Zebra team is fixing privately. + The issue number is chosen by our internal tracker, it is not meaningful. ---- - -## Motivation - -This ticket is a public placeholder for a security issue that the Zebra team is fixing privately. -The issue number is chosen by our internal tracker, it is not meaningful. - -Zebra developers must discuss the details of this issue using secure channels. + Zebra developers must discuss the details of this issue using secure channels. + Please do not discuss this issue in public. + - type: textarea + id: issue + attributes: + label: Description + description: | + Any relevant information about the issue diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md new file mode 100644 index 00000000000..fab41907d82 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release.md @@ -0,0 +1,47 @@ +--- +name: "🚀 Zebra Release" +about: 'Zebra team use only' +title: 'Publish next Zebra release: (version)' +labels: 'A-release, C-trivial, P-Medium :zap:' +assignees: '' + +--- + +# Prepare for the Release + +These release steps can be done a week before the release, in separate PRs. +They can be skipped for urgent releases. + +## Checkpoints + +For performance and security, we want to update the Zebra checkpoints in every release. +- [ ] You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints). + +## Missed Dependency Updates + +Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off. + +This step can be skipped if there is a large pending dependency upgrade. (For example, shared ECC crates.) + +Here's how we make sure we got everything: +- [ ] Run `cargo update` on the latest `main` branch, and keep the output +- [ ] If needed, [add duplicate dependency exceptions to deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) +- [ ] If needed, remove resolved duplicate dependencies from `deny.toml` +- [ ] Open a separate PR with the changes +- [ ] Add the output of `cargo update` to that PR as a comment + +# Prepare and Publish the Release + +Follow the steps in the [release checklist](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md) to prepare the release: + +Release PR: +- [ ] Update Changelog +- [ ] Update README +- [ ] Update Zebra Versions +- [ ] Update End of Support Height + +Publish Release: +- [ ] Create & Test GitHub Pre-Release +- [ ] Publish GitHub Release +- [ ] Publish Rust Crates +- [ ] Publish Docker Images diff --git a/.github/ISSUE_TEMPLATE/usability_testing_plan.md b/.github/ISSUE_TEMPLATE/usability_testing_plan.md index 16b333eb36a..c93f413b605 100644 --- a/.github/ISSUE_TEMPLATE/usability_testing_plan.md +++ b/.github/ISSUE_TEMPLATE/usability_testing_plan.md @@ -1,5 +1,5 @@ --- -name: Usability Testing Plan +name: "📋 Usability Testing Plan" about: Create a Usability Testing Plan title: 'Usability Testing Plan' labels: C-research diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 39aee582583..d7cf1ffebb6 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -1,166 +1,132 @@ --- -name: Release Checklist Template -about: Checklist of versioning to create a taggable commit for Zebra -title: '' -labels: +name: 'Release Checklist Template' +about: 'Checklist to create and publish a Zebra release' +title: 'Release Zebra (version)' +labels: 'A-release, C-trivial, P-Critical :ambulance:' assignees: '' --- -## Versioning +# Prepare for the Release -### How to Increment Versions +- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged. + (See the release ticket checklist for details) -Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR`.`MINOR`.`PATCH[`-`TAG`.`PRE-RELEASE] -The [draft `zebrad` changelog](https://github.com/ZcashFoundation/zebra/releases) will have an automatic version bump. This version is based on [the labels on the PRs in the release](https://github.com/ZcashFoundation/zebra/blob/main/.github/release-drafter.yml). +# Summarise Release Changes -Check that the automatic `zebrad` version increment matches the changes in the release: +These steps can be done a few days before the release, in the same PR: -
- -If we're releasing a mainnet network upgrade, it is a `major` release: -1. Increment the `major` version of _*all*_ the Zebra crates. -2. Increment the `patch` version of the tower crates. - -If we're not releasing a mainnet network upgrade, check for features, major changes, deprecations, and removals. If this release has any, it is a `minor` release: -1. Increment the `minor` version of `zebrad`. -2. Increment the `pre-release` version of the other crates. -3. Increment the `patch` version of the tower crates. - -Otherwise, it is a `patch` release: -1. Increment the `patch` version of `zebrad`. -2. Increment the `pre-release` version of the other crates. -3. Increment the `patch` version of the tower crates. - -Zebra's Rust API is not stable or supported, so we keep all the crates on the same beta `pre-release` version. - -
- -### Version Locations - -Once you know which versions you want to increment, you can find them in the: - -zebrad (rc): -- [ ] zebrad `Cargo.toml` -- [ ] `README.md` -- [ ] `book/src/user/docker.md` - -crates (beta): -- [ ] zebra-* `Cargo.toml`s - -tower (patch): -- [ ] tower-* `Cargo.toml`s - -auto-generated: -- [ ] `Cargo.lock`: run `cargo build` after updating all the `Cargo.toml`s - -#### Version Tooling +## Change Log -You can use `fastmod` to interactively find and replace versions. +**Important**: Any merge into `main` deletes any edits to the draft changelog. +Once you are ready to tag a release, copy the draft changelog into `CHANGELOG.md`. -For example, you can do something like: -``` -fastmod --extensions rs,toml,md --fixed-strings '1.0.0-rc.0' '1.0.0-rc.1' zebrad README.md zebra-network/src/constants.rs book/src/user/docker.md -fastmod --extensions rs,toml,md --fixed-strings '1.0.0-beta.15' '1.0.0-beta.16' zebra-* -fastmod --extensions rs,toml,md --fixed-strings '0.2.30' '0.2.31' tower-batch tower-fallback -cargo build -``` +We use [the Release Drafter workflow](https://github.com/marketplace/actions/release-drafter) to automatically create a [draft changelog](https://github.com/ZcashFoundation/zebra/releases). We follow the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format. -If you use `fastmod`, don't update versions in `CHANGELOG.md` or `zebra-dependencies-for-audit.md`. +To create the final change log: +- [ ] Copy the **latest** draft changelog into `CHANGELOG.md` (there can be multiple draft releases) +- [ ] Delete any trivial changes + - [ ] Put the list of deleted changelog entries in a PR comment to make reviewing easier +- [ ] Combine duplicate changes +- [ ] Edit change descriptions so they will make sense to Zebra users +- [ ] Check the category for each change + - Prefer the "Fix" category if you're not sure ## README +README updates can be skipped for urgent releases. + Update the README to: -- [ ] Remove any "Known Issues" that have been fixed +- [ ] Remove any "Known Issues" that have been fixed since the last release. - [ ] Update the "Build and Run Instructions" with any new dependencies. Check for changes in the `Dockerfile` since the last tag: `git diff docker/Dockerfile`. - [ ] If Zebra has started using newer Rust language features or standard library APIs, update the known working Rust version in the README, book, and `Cargo.toml`s You can use a command like: ```sh - fastmod --fixed-strings '1.58' '1.65' +fastmod --fixed-strings '1.58' '1.65' ``` -## Checkpoints +## Create the Release PR -For performance and security, we want to update the Zebra checkpoints in every release. -You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints). +- [ ] Push the updated changelog and README into a new branch + for example: `bump-v1.0.0` - this needs to be different to the tag name +- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)). +- [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. +- [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. +- [ ] Mark all non-release PRs with `do-not-merge`, because Mergify checks approved PRs against every commit, even when a queue is frozen. -## Missed Dependency Updates -Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off. +# Update Versions and End of Support -Here's how we make sure we got everything: -- [ ] Run `cargo update` on the latest `main` branch, and keep the output -- [ ] If needed, update [deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans) -- [ ] Open a separate PR with the changes, and add the output of `cargo update` to that PR as a comment +## Update Zebra Version -## Change Log +### Choose a Release Level -**Important**: Any merge into `main` deletes any edits to the draft changelog. -Once you are ready to tag a release, copy the draft changelog into `CHANGELOG.md`. +Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR.MINOR.PATCH[-TAG.PRE-RELEASE] -We use [the Release Drafter workflow](https://github.com/marketplace/actions/release-drafter) to automatically create a [draft changelog](https://github.com/ZcashFoundation/zebra/releases). We follow the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format. +Choose a release level for `zebrad`. Release levels are based on user-visible changes from the changelog: +- Mainnet Network Upgrades are `major` releases +- significant new features or behaviour changes; changes to RPCs, command-line, or configs; and deprecations or removals are `minor` releases +- otherwise, it is a `patch` release -To create the final change log: -- [ ] Copy the **latest** draft changelog into `CHANGELOG.md` (there can be multiple draft releases) -- [ ] Delete any trivial changes. Keep the list of those, to include in the PR -- [ ] Combine duplicate changes -- [ ] Edit change descriptions so they are consistent, and make sense to non-developers -- [ ] Check the category for each change - - Prefer the "Fix" category if you're not sure +Zebra's Rust API doesn't have any support or stability guarantees, so we keep all the `zebra-*` and `tower-*` crates on a beta `pre-release` version. + +### Update Crate Versions
-#### Change Categories +If you're publishing crates for the first time, click this triangle for extra steps -From "Keep a Changelog": -* `Added` for new features. -* `Changed` for changes in existing functionality. -* `Deprecated` for soon-to-be removed features. -* `Removed` for now removed features. -* `Fixed` for any bug fixes. -* `Security` in case of vulnerabilities. +- [ ] Install `cargo-release`: `cargo install cargo-release` +- [ ] Make sure you are an owner of the crate or [a member of the Zebra crates.io `owners` group on GitHub](https://github.com/orgs/ZcashFoundation/teams/owners)
-## Release support constants +Check that the release will work: +- [ ] Update crate versions, commit the changes to the release branch, and do a release dry-run: -Needed for the end of support feature. Please update the following constants [in this file](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/src/components/sync/end_of_support.rs): +```sh +cargo release version --verbose --execute --allow-branch '*' --workspace --exclude zebrad beta +cargo release version --verbose --execute --allow-branch '*' --package zebrad patch # [ major | minor | patch ] +cargo release replace --verbose --execute --allow-branch '*' --package zebrad +cargo release commit --verbose --execute --allow-branch '*' +``` -- [ ] `ESTIMATED_RELEASE_HEIGHT` (required) - Replace with the estimated height you estimate the release will be tagged. -
- - Find where the Zcash blockchain tip is now by using a [Zcash explorer](https://zcashblockexplorer.com/blocks) or other tool. - - Consider there are aprox `1152` blocks per day (with the current Zcash `75` seconds spacing). - - So for example if you think the release will be tagged somewhere in the next 3 days you can add `1152 * 3` to the current tip height and use that value here. -
+Crate publishing is [automatically checked in CI](https://github.com/ZcashFoundation/zebra/actions/workflows/release-crates-io.yml) using "dry run" mode. -- [ ] `EOS_PANIC_AFTER` (optional) - Replace if you want the release to be valid for a different numbers of days into the future. The default here is 120 days. +## Update End of Support -## Create the Release +The end of support height is calculated from the current blockchain height: +- [ ] Find where the Zcash blockchain tip is now by using a [Zcash explorer](https://zcashblockexplorer.com/blocks) or other tool. +- [ ] Replace `ESTIMATED_RELEASE_HEIGHT` in [`end_of_support.rs`](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/src/components/sync/end_of_support.rs) with the height you estimate the release will be tagged. -### Create the Release PR +
-After you have the version increments, the updated checkpoints, any missed dependency updates, -and the updated changelog: +Optional: calculate the release tagging height -- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged -- [ ] Push the version increments, the updated changelog and the release constants into a branch - (for example: `bump-v1.0.0-rc.0` - this needs to be different to the tag name) -- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/v1.0.0-rc.0-release?expand=1&template=release-checklist.md)). - - [ ] Add the list of deleted changelog entries as a comment to make reviewing easier. -- [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. -- [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue. +- Add `1152` blocks for each day until the release +- For example, if the release is in 3 days, add `1152 * 3` to the current Mainnet block height + +
+ +## Update the Release PR + +- [ ] Push the version increments and the release constants to the release branch. + + +# Publish the Zebra Release -### Create the Release +## Create the GitHub Pre-Release -- [ ] Once the PR has been merged, create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases) +- [ ] Wait for all the release PRs to be merged +- [ ] Create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases) - [ ] Set the tag name to the version tag, - for example: `v1.0.0-rc.0` + for example: `v1.0.0` - [ ] Set the release to target the `main` branch - [ ] Set the release title to `Zebra ` followed by the version tag, - for example: `Zebra 1.0.0-rc.0` + for example: `Zebra 1.0.0` - [ ] Replace the prepopulated draft changelog in the release description with the final changelog you created; starting just _after_ the title `## [Zebra ...` of the current version being released, and ending just _before_ the title of the previous release. @@ -168,25 +134,28 @@ and the updated changelog: - [ ] Publish the pre-release to GitHub using "Publish Release" - [ ] Delete all the [draft releases from the list of releases](https://github.com/ZcashFoundation/zebra/releases) -## Binary Testing +## Test the Pre-Release - [ ] Wait until the [Docker binaries have been built on `main`](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-integration-docker.yml), and the quick tests have passed. - (You can ignore the full sync and `lightwalletd` tests, because they take about a day to run.) - [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/continous-delivery.yml) -- [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release" -- [ ] Wait until [the Docker images have been published](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml) -- [ ] Test the Docker image using `docker run --tty --interactive zfnd/zebra:1.0.0-rc.`, - and put the output in a comment on the PR - -- [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. +## Publish Release + +- [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release" -## Telling Zebra Users +## Publish Crates -- [ ] Post a summary of the important changes in the release in the `#arborist` and `#communications` Slack channels +- [ ] Run `cargo login` +- [ ] Run `cargo clean` in the zebra repo (optional) +- [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute` +- [ ] Check that Zebra can be installed from `crates.io`: + `cargo install --locked --force --version 1.minor.patch zebrad && ~/.cargo/bin/zebrad` + and put the output in a comment on the PR. -If the release contains new features (`major` or `minor`), or high-priority bug fixes: -- [ ] Ask the team about doing a blog post +## Publish Docker Images +- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml). +- [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify. +- [ ] Remove `do-not-merge` from the PRs you added it to ## Release Failures @@ -194,10 +163,12 @@ If building or running fails after tagging:
+Tag a new release, following these instructions... + 1. Fix the bug that caused the failure -2. Increment versions again, following these instructions from the start -3. Update the code and documentation with a **new** git tag +2. Start a new `patch` release +3. Skip the **Release Preparation**, and start at the **Release Changes** step 4. Update `CHANGELOG.md` with details about the fix -5. Tag a **new** release +5. Follow the release checklist for the new Zebra version
diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8f810e4b3c4..86bbd3eceec 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,21 +2,133 @@ version: 2 updates: - package-ecosystem: cargo directory: '/' + # serde, clap, and other dependencies sometimes have multiple updates in a week schedule: - interval: daily + interval: weekly + day: monday timezone: America/New_York - open-pull-requests-limit: 10 + # Limit dependabot to 1 PR per reviewer + open-pull-requests-limit: 6 labels: - 'C-trivial' - 'A-rust' - 'A-dependencies' - 'P-Low :snowflake:' + groups: + ecc: + patterns: + # deliberately include zcash_script (even though it is maintained by ZF) + - "zcash_*" + - "orchard" + - "halo2*" + - "incrementalmerkletree" + - "equihash" + # addresses + - "bs58" + - "ripemd" + # groups are limited to 10 items + crypto: + patterns: + - "bellman" + # reddsa, redjubjub + - "red*" + - "jubjub" + - "group" + - "bls12_381" + - "blake*" + - "secp256k1" + - "sha2" + - "*25519*" + - "rand*" + async: + patterns: + - "tokio*" + - "console-subscriber" + - "tower*" + - "hyper*" + - "h2" + - "reqwest" + - "futures*" + - "pin-project*" + log: + patterns: + - "tracing*" + - "log" + - "*eyre*" + - "thiserror" + - "displaydoc" + - "spandoc" + - "owo-colors" + - "sentry*" + - "metrics*" + - "inferno" + concurrency: + patterns: + - "once_cell" + - "lazy_static" + - "rayon*" + - "crossbeam*" + - "num_cpus" + progress-bar: + patterns: + - "indicatif" + - "howudoin" + time: + patterns: + - "chrono*" + - "time*" + - "humantime*" + app: + patterns: + - "abscissa*" + - "structopt*" + - "clap*" + - "atty*" + - "semver*" + # dirs, directories, directories-next + - "dir*" + - "vergen" + - "*git*" + - "toml*" + - "rlimit" + formats: + patterns: + - "serde*" + - "jsonrpc*" + - "hex*" + - "regex" + - "byteorder" + - "bytes" + - "bincode" + data-structures: + patterns: + - "bitflags*" + - "bitvec" + - "indexmap" + - "num-integer" + - "primitive-types" + - "uint" + - "tinyvec" + - "itertools" + - "ordered-map" + - "mset" + test: + patterns: + - "proptest*" + - "insta" + - "prost*" + - "tonic*" + - "tempfile" + - "static_assertions" + - "criterion" - package-ecosystem: github-actions directory: '/' schedule: - interval: daily + # tj-actions/changed-files often updates daily, which is too much for us + interval: weekly + day: wednesday timezone: America/New_York - open-pull-requests-limit: 10 + open-pull-requests-limit: 4 labels: - 'C-trivial' - 'A-devops' diff --git a/.github/mergify.yml b/.github/mergify.yml index 11cf9aeecf2..a0a5b7e282d 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -29,34 +29,67 @@ queue_rules: conditions: - base=main +# These rules are checked in order, the first one to be satisfied applies pull_request_rules: - - name: move to urgent queue when CI passes with 1 review and not WIP targeting main + - name: move to urgent queue when CI passes with multiple reviews conditions: - # This queue handles a PR if: - # - it targets main - # - is not in draft - # - does not include the do-not-merge label - # - is labeled with Critical priority + # This queue handles a PR if it: + # has multiple approving reviewers + - "#approved-reviews-by>=2" + # is labeled with Critical priority + - 'label~=^P-Critical' + # and satisfies the standard merge conditions: + # targets main - base=main + # is not in draft - -draft + # does not include the do-not-merge label - label!=do-not-merge + actions: + queue: + name: urgent + method: squash + + - name: move to urgent queue when CI passes with 1 review + conditions: + # This queue handles a PR if it: + # has at least one approving reviewer (branch protection rule) + # does not need extra reviews + - 'label!=extra-reviews' + # is labeled with Critical priority - 'label~=^P-Critical' + # and satisfies the standard merge conditions: + - base=main + - -draft + - label!=do-not-merge actions: queue: name: urgent method: squash - - name: move to batched queue when CI passes with 1 review and not WIP targeting main + - name: move to batched queue when CI passes with multiple reviews + conditions: + # This queue handles a PR if it: + # has multiple approving reviewers + - "#approved-reviews-by>=2" + # is labeled with any other priority (rules are checked in order) + # and satisfies the standard merge conditions: + - base=main + - -draft + - label!=do-not-merge + actions: + queue: + name: batched + method: squash + + - name: move to batched queue when CI passes with 1 review conditions: - # This queue handles a PR if: - # - it targets main - # - is not in draft - # - does not include the do-not-merge label - # - is labeled with any other priority except Critical, or does not have a priority label, - # including automated dependabot PRs. - # - # We don't need to check priority labels here, because the rules are evaluated in order: - # https://docs.mergify.com/configuration/#pull-request-rules + # This queue handles a PR if it: + # has at least one approving reviewer (branch protection rule) + # does not need extra reviews + - 'label!=extra-reviews' + # is labeled with any other priority (rules are checked in order) + # and satisfies the standard merge conditions: - base=main - -draft - label!=do-not-merge diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index 65e55e76cf3..f291980b693 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -11,34 +11,33 @@ autolabeler: - '/secur/i' title: - '/secur/i' - - label: 'C-removed' + - '/crash/i' + - '/destr/i' + - '/unsafe/i' + - label: 'C-deprecated' branch: - - '/remov/i' + - '/deprecat/i' title: - - '/remov/i' - - label: 'C-deprecated' + - '/deprecat/i' + - label: 'extra-reviews' branch: + - '/remov/i' - '/deprecat/i' title: + - '/remov/i' - '/deprecat/i' + - '/crash/i' + - '/destr/i' + - '/unsafe/i' - label: 'C-feature' branch: - - '/add/i' - '/feat/i' title: - - '/add/i' - '/feat/i' - - label: 'C-enhancement' - branch: - - '/chang/i' - title: - - '/chang/i' - label: 'C-bug' branch: - - '/fix/i' - '/bug/i' title: - - '/fix/i' - '/bug/i' # Changes that are almost always trivial for users - label: 'C-trivial' @@ -46,16 +45,24 @@ autolabeler: - '/clean/i' - '/chore/i' - '/clippy/i' + - '/test/i' title: - '/clean/i' - '/chore/i' - '/clippy/i' + - '/test/i' + - '/(ci)/i' + - '/(cd)/i' + - '/job/i' + - '/patch/i' + - '/actions/i' files: # Regular changes that don't need to go in the CHANGELOG - 'CHANGELOG.md' - 'zebra-consensus/src/checkpoint/*-checkpoints.txt' # Developer-only changes - '.gitignore' + - '.dockerignore' # Test-only changes - 'zebra-test' - '.cargo/config.toml' @@ -80,8 +87,7 @@ categories: labels: - 'C-security' # Other labels that are usually security issues - - 'I-bad-code' - - 'I-bad-data' + - 'I-invalid-data' - 'I-consensus' - 'I-crash' - 'I-destructive' @@ -90,11 +96,10 @@ categories: - 'I-privacy' - 'I-remote-node-overload' - 'I-unbounded-growth' - - 'I-unsound' + - 'I-memory-safety' - title: 'Removed' labels: - 'C-removal' - - 'C-breaking' - title: 'Deprecated' labels: - 'C-deprecation' @@ -164,9 +169,9 @@ template: | ### Breaking Changes This release has the following breaking changes: - - *TODO*: Check the `Removed` section for any breaking changes + - *TODO*: Check the `Removed` and `Deprecated` sections for any breaking changes - *TODO*: Add a short description of the user impact of each breaking change, and any actions users need to take - + $CHANGES ### Contributors diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/build-crates-individually.patch.yml index ef5b6f01c8f..ad957e8521e 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/build-crates-individually.patch.yml @@ -23,7 +23,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 3c68261c57d..117585ff80e 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -50,8 +50,8 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v3.5.2 - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: actions/checkout@v3.5.3 + - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust @@ -106,16 +106,16 @@ jobs: matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and minimal profile diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 3b6d997b593..a684dd097ef 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -21,12 +21,6 @@ on: type: string workflow_call: inputs: - network: - required: false - type: string - checkpoint_sync: - required: false - type: boolean image_name: required: true type: string @@ -49,12 +43,6 @@ on: rust_lib_backtrace: required: false type: string - colorbt_show_hidden: - required: false - type: string - zebra_skip_ipv6_tests: - required: false - type: string rust_log: required: false type: string @@ -63,18 +51,20 @@ on: # https://github.com/ZcashFoundation/zebra/blob/main/docker/Dockerfile#L83 features: required: false - default: "sentry" + default: "default-release-binaries" type: string test_features: required: false default: "lightwalletd-grpc-tests zebra-checkpoints" type: string - rpc_port: - required: false - type: string tag_suffix: required: false type: string + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false outputs: image_digest: @@ -93,10 +83,10 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -106,12 +96,12 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v4.4.0 + uses: docker/metadata-action@v4.6.0 with: # list of Docker images to use as base name for tags images: | us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/${{ inputs.image_name }} - zfnd/zebra,enable=${{ github.event_name == 'release' && !github.event.release.prerelease }} + zfnd/${{ inputs.image_name }},enable=${{ github.event_name == 'release' && !github.event.release.prerelease }} # appends inputs.tag_suffix to image tags/names flavor: | suffix=${{ inputs.tag_suffix }} @@ -148,7 +138,7 @@ jobs: access_token_lifetime: 10800s - name: Login to Google Artifact Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken @@ -158,7 +148,7 @@ jobs: # We only publish images to DockerHub if a release is not a pre-release # Ref: https://github.com/orgs/community/discussions/26281#discussioncomment-3251177 if: ${{ github.event_name == 'release' && !github.event.release.prerelease }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -166,7 +156,7 @@ jobs: # Build and push image to Google Artifact Registry, and possibly DockerHub - name: Build & push id: docker_build - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.1 with: target: ${{ inputs.dockerfile_target }} context: ${{ inputs.docker_context || '.' }} @@ -174,25 +164,20 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | - NETWORK=${{ inputs.network }} SHORT_SHA=${{ env.GITHUB_SHA_SHORT }} - RUST_BACKTRACE=${{ inputs.rust_backtrace }} - RUST_LIB_BACKTRACE=${{ inputs.rust_lib_backtrace }} - COLORBT_SHOW_HIDDEN=${{ inputs.colorbt_show_hidden }} - ZEBRA_SKIP_IPV6_TESTS=${{ inputs.zebra_skip_ipv6_tests }} - CHECKPOINT_SYNC=${{ inputs.checkpoint_sync }} RUST_LOG=${{ inputs.rust_log }} FEATURES=${{ inputs.features }} TEST_FEATURES=${{ inputs.test_features }} - RPC_PORT=${{ inputs.rpc_port }} push: true + # Don't read from the cache if the caller disabled it. + # https://docs.docker.com/engine/reference/commandline/buildx_build/#options + no-cache: ${{ inputs.no_cache }} # To improve build speeds, for each branch we push an additional image to the registry, # to be used as the caching layer, using the `max` caching mode. # - # We use multiple cache sources to confirm a cache hit, starting from a per-branch cache, - # and if there's no hit, then continue with the `main` branch. When changes are added to a PR, - # they are usually smaller than the diff between the PR and `main` branch. So this provides the - # best performance. + # We use multiple cache sources to confirm a cache hit, starting from a per-branch cache. + # If there's no hit, we continue with a `main` branch cache, which helps us avoid + # rebuilding cargo-chef, most dependencies, and possibly some Zebra crates. # # The caches are tried in top-down order, the first available cache is used: # https://github.com/moby/moby/pull/26839#issuecomment-277383550 diff --git a/.github/workflows/continous-delivery.patch.yml b/.github/workflows/continous-delivery.patch.yml new file mode 100644 index 00000000000..f51ef601468 --- /dev/null +++ b/.github/workflows/continous-delivery.patch.yml @@ -0,0 +1,35 @@ +name: CD + +on: + # Only patch the Docker image test jobs + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/continous-delivery.yml' + - '.github/workflows/find-cached-disks.yml' + + +jobs: + build: + name: Build CD Docker / Build images + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-configuration-file: + name: Test Zebra CD Docker config file + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 5a91c4f7db8..35dda6a94b0 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -2,10 +2,16 @@ name: CD # Ensures that only one workflow task will run at a time. Previous deployments, if # already in process, won't get cancelled. Instead, we let the first to complete -# then queue the latest pending workflow, cancelling any workflows in between +# then queue the latest pending workflow, cancelling any workflows in between. +# +# Since the different event types each use a different Managed Instance Group or instance, +# we can run different event types concurrently. +# +# For pull requests, we only run the tests from this workflow, and don't do any deployments. +# So an in-progress pull request gets cancelled, just like other tests. concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} on: workflow_dispatch: @@ -14,17 +20,60 @@ on: default: 'Mainnet' description: 'Network to deploy: Mainnet or Testnet' required: true - checkpoint_sync: - default: 'true' - description: 'Use as many checkpoints as possible when syncing' - required: true - push: - branches: - - main + log_file: + default: '' + description: 'Log to a file path rather than standard output' + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false + + # Temporarily disabled to reduce network load, see #6894. + #push: + # branches: + # - main + # paths: + # # code and tests + # - '**/*.rs' + # # hard-coded checkpoints and proptest regressions + # - '**/*.txt' + # # dependencies + # - '**/Cargo.toml' + # - '**/Cargo.lock' + # # configuration files + # - '.cargo/config.toml' + # - '**/clippy.toml' + # # workflow definitions + # - 'docker/**' + # - '.dockerignore' + # - '.github/workflows/continous-delivery.yml' + # - '.github/workflows/build-docker-image.yml' + + # Only runs the Docker image tests, doesn't deploy any instances + pull_request: + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/continous-delivery.yml' + - '.github/workflows/find-cached-disks.yml' + release: types: - published + jobs: # If a release was made we want to extract the first part of the semver from the # tag_name @@ -64,60 +113,120 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime image_name: zebrad - # We need to hard-code Mainnet here, because env is not allowed in this context - network: ${{ inputs.network || 'Mainnet' }} - checkpoint_sync: true - rust_backtrace: '1' - zebra_skip_ipv6_tests: '1' + no_cache: ${{ inputs.no_cache || false }} rust_log: info - # Test that Zebra works using the default config with the latest Zebra version + # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: - name: Test Zebra default Docker config file - timeout-minutes: 5 + name: Test Zebra CD Docker config file + timeout-minutes: 15 runs-on: ubuntu-latest needs: build steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 with: short-length: 7 + # Make sure Zebra can sync at least one full checkpoint on mainnet - name: Run tests using the default config run: | set -ex docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) + # show the logs, even if the job times out + docker logs --tail all --follow default-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + 'net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter' docker stop default-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait default-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" default-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) docker logs default-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi exit "$EXIT_STATUS" - # This jobs handles the deployment of a Managed Instance Group (MiG) with 2 nodes in - # the us-central1 region. Two different groups of MiGs are deployed one for pushes to - # the main branch and another for version releases of Zebra + # Test reconfiguring the docker image for testnet. + test-configuration-file-testnet: + name: Test testnet Zebra CD Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Make sure Zebra can sync the genesis block on testnet + - name: Run tests using a testnet config + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + # show the logs, even if the job times out + docker logs --tail all --follow testnet-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'net.*=.*Test.*estimated progress to chain tip.*Genesis' \ + -e 'net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter' + docker stop testnet-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait testnet-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" testnet-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) + docker logs testnet-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi + exit "$EXIT_STATUS" + + # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, + # with one node in the configured GCP region. # - # Once this workflow is triggered the previous MiG is replaced, on pushes to main its - # always replaced, and with releases its only replaced if the same major version is - # being deployed, otherwise a new major version is deployed + # Separate Mainnet and Testnet MiGs are deployed whenever there are: + # - pushes to the main branch, or + # - version releases of Zebra. + # + # Once this workflow is triggered: + # - by pushes to main: the MiG is always replaced, + # - by releases: the MiG is only replaced if the same major version is being deployed, + # otherwise a new major version is deployed in a new MiG. # # Runs: # - on every push/merge to the `main` branch # - on every release, when it's published deploy-nodes: - name: Deploy ${{ inputs.network || 'Mainnet' }} nodes + strategy: + matrix: + network: [Mainnet, Testnet] + name: Deploy ${{ matrix.network }} nodes needs: [ build, test-configuration-file, versioning ] runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 60 permissions: contents: 'read' id-token: 'write' if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -129,12 +238,12 @@ jobs: # Makes the Zcash network name lowercase. # # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network || 'Mainnet' }}. + # uses sentence case, so we need to downcase the network. # # Passes the lowercase network to subsequent steps using $NETWORK env variable. - name: Downcase network name for labels run: | - NETWORK_CAPS="${{ inputs.network || 'Mainnet' }}" + NETWORK_CAPS="${{ matrix.network }}" echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" # Setup gcloud CLI @@ -151,15 +260,21 @@ jobs: # TODO we should implement the fixes from https://github.com/ZcashFoundation/zebra/pull/5670 here # but the implementation is failing as it's requiring the disk names, contrary to what is stated in the official documentation - - name: Create instance template + - name: Create instance template for ${{ matrix.network }} run: | - gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ + --boot-disk-size 300GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ + --user-output-enabled \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ + --container-stdin \ + --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=300GB,type=pd-ssd \ - --container-mount-disk=mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ + --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ + --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ + --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ @@ -168,34 +283,34 @@ jobs: --tags zebrad # Check if our destination instance group exists already - - name: Check if instance group exists + - name: Check if ${{ matrix.network }} instance group exists id: does-group-exist continue-on-error: true run: | - gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" | grep "${{ vars.GCP_REGION }}" + gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" | grep "${{ vars.GCP_REGION }}" # Deploy new managed instance group using the new instance template - - name: Create managed instance group + - name: Create managed instance group for ${{ matrix.network }} if: steps.does-group-exist.outcome == 'failure' run: | gcloud compute instance-groups managed create \ - "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ - --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \ + --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --health-check zebrad-tracing-filter \ --initial-delay 30 \ --region "${{ vars.GCP_REGION }}" \ --size 1 # Rolls out update to existing group using the new instance template - - name: Update managed instance group + - name: Update managed instance group for ${{ matrix.network }} if: steps.does-group-exist.outcome == 'success' run: | gcloud compute instance-groups managed rolling-action start-update \ - "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ - --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \ + --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --region "${{ vars.GCP_REGION }}" - # This jobs handles the deployment of a single node (1) in the us-central1-a zone + # This jobs handles the deployment of a single node (1) in the configured GCP zone # when an instance is required to test a specific commit # # Runs: @@ -203,7 +318,7 @@ jobs: # # Note: this instances are not automatically replaced or deleted deploy-instance: - name: Deploy single instance + name: Deploy single ${{ inputs.network }} instance needs: [ build, test-configuration-file ] runs-on: ubuntu-latest timeout-minutes: 30 @@ -213,7 +328,7 @@ jobs: if: github.event_name == 'workflow_dispatch' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -222,6 +337,17 @@ jobs: with: short-length: 7 + # Makes the Zcash network name lowercase. + # + # Labels in GCP are required to be in lowercase, but the blockchain network + # uses sentence case, so we need to downcase the network. + # + # Passes the lowercase network to subsequent steps using $NETWORK env variable. + - name: Downcase network name for labels + run: | + NETWORK_CAPS="${{ inputs.network }}" + echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" + # Setup gcloud CLI - name: Authenticate to Google Cloud id: auth @@ -235,19 +361,21 @@ jobs: uses: google-github-actions/setup-gcloud@v1.1.1 # Create instance template from container image - - name: Manual deploy of a single instance running zebrad + - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad run: | - gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ --boot-disk-size 300GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ + --user-output-enabled \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ - --create-disk=auto-delete=yes,size=300GB,type=pd-ssd \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=300GB,type=pd-ssd \ - --container-mount-disk=mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ + --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ + --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ + --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/continous-integration-docker.patch.yml index b3cf3a8b537..489d75b01b3 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/continous-integration-docker.patch.yml @@ -19,8 +19,10 @@ on: - '**/clippy.toml' # workflow definitions - 'docker/**' + - '.dockerignore' - '.github/workflows/continous-integration-docker.yml' - '.github/workflows/deploy-gcp-tests.yml' + - '.github/workflows/find-cached-disks.yml' - '.github/workflows/build-docker-image.yml' jobs: diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index 01ff979aa51..bbce29d87f5 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -8,16 +8,18 @@ concurrency: cancel-in-progress: true on: + schedule: + # Run this job every Friday at mid-day UTC + # This is limited to the Zebra and lightwalletd Full Sync jobs + # TODO: we should move this behavior to a separate workflow + - cron: '0 12 * * 5' + workflow_dispatch: inputs: network: default: 'Mainnet' description: 'Network to deploy: Mainnet or Testnet' required: true - checkpoint_sync: - default: 'true' - description: 'Configures `zebrad` to use as many checkpoints as possible' - required: true regenerate-disks: type: boolean default: false @@ -33,6 +35,16 @@ on: default: false description: 'Just run a lightwalletd full sync and update tip disks' required: true + force_save_to_disk: + required: false + type: boolean + default: false + description: 'Force tests to always create a cached state disk, if they already create disks' + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false pull_request: paths: @@ -73,12 +85,16 @@ on: - '**/clippy.toml' # workflow definitions - 'docker/**' + - '.dockerignore' - '.github/workflows/continous-integration-docker.yml' - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/build-docker-image.yml' - '.github/workflows/find-cached-disks.yml' + - '.github/workflows/build-docker-image.yml' jobs: + # to also run a job on Mergify head branches, + # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: + # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 # Check if the cached state disks used by the tests are available for the default network. # @@ -114,16 +130,16 @@ jobs: dockerfile_path: ./docker/Dockerfile dockerfile_target: tests image_name: ${{ vars.CI_IMAGE_NAME }} - network: ${{ inputs.network || vars.ZCASH_NETWORK }} - checkpoint_sync: true + no_cache: ${{ inputs.no_cache || false }} rust_backtrace: full rust_lib_backtrace: full - colorbt_show_hidden: '1' - zebra_skip_ipv6_tests: '1' rust_log: info # zebrad tests without cached state + # TODO: make the non-cached-state tests use: + # network: ${{ inputs.network || vars.ZCASH_NETWORK }} + # Run all the zebra tests, including tests that are ignored by default. # Skips tests that need a cached state disk or a lightwalletd binary. # @@ -137,7 +153,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -150,7 +166,9 @@ jobs: - name: Run zebrad tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # zebrad tests without cached state with `getblocktemplate-rpcs` feature # @@ -161,7 +179,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -171,7 +189,9 @@ jobs: - name: Run zebrad tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Run state tests with fake activation heights. # @@ -188,7 +208,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -198,9 +218,10 @@ jobs: - name: Run tests with fake activation heights run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights env: TEST_FAKE_ACTIVATION_HEIGHTS: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. # @@ -211,7 +232,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -221,7 +242,9 @@ jobs: - name: Run zebrad large sync tests run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test launching lightwalletd with an empty lightwalletd and Zebra state. # @@ -232,7 +255,7 @@ jobs: needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -242,19 +265,20 @@ jobs: - name: Run tests with empty lightwalletd launch run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration + docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration env: ZEBRA_TEST_LIGHTWALLETD: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test that Zebra works using the default config with the latest Zebra version test-configuration-file: name: Test Zebra default Docker config file - timeout-minutes: 5 + timeout-minutes: 15 runs-on: ubuntu-latest needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -265,21 +289,23 @@ jobs: run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) docker stop default-conf-tests docker logs default-conf-tests exit "$EXIT_STATUS" + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} # Test that Zebra works using the $ZEBRA_CONF_PATH config test-zebra-conf-path: name: Test Zebra custom Docker config file - timeout-minutes: 5 + timeout-minutes: 15 runs-on: ubuntu-latest needs: build if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} steps: - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -290,13 +316,17 @@ jobs: run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start + docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) docker stop variable-conf-tests docker logs variable-conf-tests exit "$EXIT_STATUS" env: ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # END TODO: make the non-cached-state tests use: + # network: ${{ inputs.network || vars.ZCASH_NETWORK }} # zebrad cached checkpoint state tests @@ -316,9 +346,10 @@ jobs: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: '-e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' needs_zebra_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: checkpoint height_grep_text: 'flushing database to disk .*height.*=.*Height.*\(' secrets: inherit @@ -344,7 +375,7 @@ jobs: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: '-e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -358,7 +389,7 @@ jobs: # This test always runs on mainnet. # # Runs: - # - after every PR is merged to `main` + # - on schedule, as defined at the top of the workflow # - on every PR update, but only if the state version in constants.rs has no cached disk # - in manual workflow runs, when run-full-sync is 'true' and network is 'Mainnet' # @@ -367,22 +398,19 @@ jobs: name: Zebra tip needs: [ build, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - # to also run on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Mainnet') }} + if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: app_name: zebrad test_id: full-sync-to-tip test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' - network: 'Mainnet' + test_variables: '-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -418,10 +446,11 @@ jobs: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: '-e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip root_state_path: '/var/cache' zebra_state_dir: 'zebrad-cache' @@ -451,8 +480,7 @@ jobs: test_id: generate-checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: 'Mainnet' + test_variables: '-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false @@ -472,7 +500,7 @@ jobs: # This job always runs on testnet, regardless of any inputs or variable settings. # # Runs: - # - after every PR is merged to `main` + # - on schedule, as defined at the top of the workflow # - on every PR update, but only if the state version in constants.rs has no cached disk # - in manual workflow runs, when run-full-sync is 'true' and network is 'Testnet' # @@ -481,22 +509,20 @@ jobs: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] uses: ./.github/workflows/deploy-gcp-tests.yml - # to also run on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && github.event.inputs.network == 'Testnet') }} + if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: app_name: zebrad test_id: full-sync-to-tip-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: '-e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' - network: 'Testnet' + test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' + network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. is_long_test: true needs_zebra_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -534,12 +560,13 @@ jobs: app_name: zebrad test_id: generate-checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: '-e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' - network: 'Testnet' + test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed # we don't have a test-update-sync-testnet job, so we need to update the disk here saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip root_state_path: '/var/cache' zebra_state_dir: 'zebrad-cache' @@ -551,7 +578,9 @@ jobs: # Test full sync of lightwalletd with a Zebra tip state # # Runs: - # - after every PR is merged to `main` + # - on schedule, as defined at the top of the workflow + # - on every PR update, but only if the state version in constants.rs has no cached disk + # - in manual workflow runs, when run-lwd-sync is 'true' and network is 'Mainnet' (the network is required by the test-full-sync job) # # If the state version has changed, waits for the new cached state to be created. # Otherwise, if the state rebuild was skipped, runs immediately after the build job. @@ -559,20 +588,19 @@ jobs: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - # to also run on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && ((github.event_name == 'push' && github.ref_name == 'main') || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} + # Currently the lightwalletd tests only work on Mainnet + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: '-e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' - # This test runs for (just) longer than 6 hours, so it needs multiple jobs + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true needs_lwd_state: false saves_to_disk: true + force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache disk_suffix: tip root_state_path: '/var/cache' @@ -600,12 +628,12 @@ jobs: name: lightwalletd tip update needs: [ lightwalletd-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: '-e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true # since we do a full sync in every PR, the new cached state will only be a few minutes newer than the original one @@ -632,12 +660,12 @@ jobs: name: Zebra tip JSON-RPC needs: [ test-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: '-e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true saves_to_disk: false disk_suffix: tip @@ -657,12 +685,12 @@ jobs: name: lightwalletd tip send needs: [ lightwalletd-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: '-e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -684,12 +712,12 @@ jobs: name: lightwalletd GRPC tests needs: [ lightwalletd-full-sync, get-available-disks ] uses: ./.github/workflows/deploy-gcp-tests.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: '-e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' needs_zebra_state: true needs_lwd_state: true saves_to_disk: false @@ -720,7 +748,7 @@ jobs: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: '-e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true needs_lwd_state: false saves_to_disk: false @@ -746,7 +774,7 @@ jobs: app_name: zebrad test_id: submit-block test_description: Test submitting blocks via Zebra's rpc server - test_variables: '-e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' needs_zebra_state: true needs_lwd_state: false saves_to_disk: false diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index 8619ae3c541..fe81951024a 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -22,7 +22,8 @@ jobs: strategy: matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 - os: [ubuntu-latest, macos-latest] + # TODO: macOS tests were removed for now, see https://github.com/ZcashFoundation/zebra/issues/6824 + os: [ubuntu-latest] rust: [stable, beta] features: ["", " --features getblocktemplate-rpcs"] exclude: @@ -30,6 +31,9 @@ jobs: rust: beta - os: macos-latest features: " --features getblocktemplate-rpcs" + - os: ubuntu-latest + rust: beta + features: " --features getblocktemplate-rpcs" steps: - run: 'echo "No build required"' @@ -56,7 +60,7 @@ jobs: checks: - bans - sources - features: ['', '--all-features', '--no-default-features'] + features: ['', '--features default-release-binaries', '--all-features'] steps: - run: 'echo "No build required"' diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 9633100541d..7242fddfb48 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -69,7 +69,8 @@ jobs: fail-fast: false matrix: # TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801 - os: [ubuntu-latest, macos-latest] + # TODO: macOS tests were removed for now, see https://github.com/ZcashFoundation/zebra/issues/6824 + os: [ubuntu-latest] rust: [stable, beta] features: ["", " --features getblocktemplate-rpcs"] exclude: @@ -81,18 +82,23 @@ jobs: rust: beta - os: macos-latest features: " --features getblocktemplate-rpcs" + # getblocktemplate-rpcs is an experimental feature, so we just need to test it on stable Rust + # beta is unlikely to fail just for this feature, and if it does, we can fix it when it reaches stable. + - os: ubuntu-latest + rust: beta + features: " --features getblocktemplate-rpcs" steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with ${{ matrix.rust }} toolchain and minimal profile @@ -101,7 +107,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.6.0 # TODO: change Rust cache target directory on Windows, # or remove this workaround once the build is more efficient (#3005). #with: @@ -197,10 +203,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust @@ -219,16 +225,16 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and minimal profile @@ -236,7 +242,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.6.0 with: shared-key: "clippy-cargo-lock" @@ -252,25 +258,30 @@ jobs: checks: - bans - sources - features: ['', '--all-features', '--no-default-features'] - # We always want to run the --all-features job, because it gives accurate "skip tree root was not found" warnings + # We don't need to check `--no-default-features` here, because (except in very rare cases): + # - disabling features isn't going to add duplicate dependencies + # - disabling features isn't going to add more crate sources + features: ['', '--features default-release-binaries', '--all-features'] + # Always run the --all-features job, to get accurate "skip tree root was not found" warnings fail-fast: false # Prevent sudden announcement of a new advisory from failing ci: continue-on-error: ${{ matrix.checks == 'advisories' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - # The --all-features job is the only job that gives accurate "skip tree root was not found" warnings. - # In other jobs, we expect some of these warnings, due to disabled features. - name: Check ${{ matrix.checks }} with features ${{ matrix.features }} uses: EmbarkStudios/cargo-deny-action@v1 with: - command: check ${{ matrix.checks }} + # --all-features spuriously activates openssl, but we want to ban that dependency in + # all of zebrad's production features for security reasons. But the --all-features job is + # the only job that gives accurate "skip tree root was not found" warnings. + # In other jobs, we expect some of these warnings, due to disabled features. + command: check ${{ matrix.checks }} ${{ matrix.features == '--all-features' && '--allow banned' || '--allow unmatched-skip-root' }} arguments: --workspace ${{ matrix.features }} unused-deps: @@ -279,10 +290,10 @@ jobs: steps: - name: Checkout git repository - uses: actions/checkout@v3.5.2 + uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust @@ -290,7 +301,7 @@ jobs: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal - name: Install cargo-machete - uses: baptiste0928/cargo-install@v2.0.0 + uses: baptiste0928/cargo-install@v2.1.0 with: crate: cargo-machete @@ -302,7 +313,7 @@ jobs: cargo machete --skip-target-dir || true echo "-- unused dependencies are below this line, full output is above --" if (cargo machete --skip-target-dir 2>/dev/null || true) | \ - grep -v -e gumdrop -e humantime-serde -e tinyvec -e "found the following" -e Cargo.toml -e Done; then + grep -v -e gumdrop -e humantime-serde -e tinyvec -e zebra-utils -e "found the following" -e Cargo.toml -e Done; then echo "New unused dependencies were found, please remove them!" exit 1 else diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 0eba61e0868..5eecd3a6dd3 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/delete-gcp-resources.yml index f0b97ff1a64..b92e2f94d20 100644 --- a/.github/workflows/delete-gcp-resources.yml +++ b/.github/workflows/delete-gcp-resources.yml @@ -30,7 +30,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -239,7 +239,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false @@ -254,7 +254,7 @@ jobs: token_format: 'access_token' - name: Login to Google Artifact Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index d2c7f37a027..ee4db139afd 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -79,7 +79,12 @@ on: saves_to_disk: required: true type: boolean - description: 'Does the test create a new cached state disk?' + description: 'Can this test create new or updated cached state disks?' + force_save_to_disk: + required: false + type: boolean + default: false + description: 'Force this test to create a new or updated cached state disk' app_name: required: false type: string @@ -109,11 +114,11 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -204,11 +209,11 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -270,11 +275,11 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -448,11 +453,11 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -571,29 +576,24 @@ jobs: " - # follow the logs of the test we just launched, up to Sapling activation (or the test finishing) + # check the logs of the test we just launched for zebrad startup messages # - # If `inputs.is_long_test` is `false`, this job is skipped. - logs-sprout: - name: Log ${{ inputs.test_id }} test (sprout) + # this step makes sure `zebrad` is running, and configured for `inputs.network`. + logs-startup: + name: Check startup for ${{ inputs.test_id }} # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. needs: [ launch-with-cached-state, launch-without-cached-state ] # If the previous job fails, we still want to show the logs. - if: ${{ !cancelled() && inputs.is_long_test }} + if: ${{ !cancelled() }} runs-on: ubuntu-latest permissions: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - # We can't use the standard Rust problem matchers on these jobs, - # because they produce a lot of output. - # - # TODO: create a custom matcher config for these specific jobs - #- uses: r7kamura/rust-problem-matchers@v1.3.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -623,13 +623,43 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v1.1.1 + # Show all the logs since the container launched, + # following until we see zebrad startup messages. + # + # This check limits the number of log lines, so tests running on the wrong network don't + # run until the job timeout. If Zebra does a complete recompile, there are a few hundred log + # lines before the startup logs. So that's what we use here. + # + # The log pipeline ignores the exit status of `docker logs`. + # It also ignores the expected 'broken pipe' error from `tee`, + # which happens when `grep` finds a matching output and moves on to the next job. + # + # Errors in the tests are caught by the final test status job. + - name: Check startup logs for ${{ inputs.test_id }} + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + head -700 | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'Zcash network: ${{ inputs.network }}' \ + " + # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) # # If `inputs.is_long_test` is `false`, this job is skipped. logs-heartwood: name: Log ${{ inputs.test_id }} test (heartwood) - # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - needs: [ launch-with-cached-state, launch-without-cached-state ] + needs: [ logs-startup ] # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() && inputs.is_long_test }} runs-on: ubuntu-latest @@ -637,7 +667,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -672,12 +702,6 @@ jobs: # Show all the logs since the container launched, # following until Canopy activation (or the test finishes) - # - # The log pipeline ignores the exit status of `docker logs`. - # It also ignores the expected 'broken pipe' error from `tee`, - # which happens when `grep` finds a matching output and moves on to the next job. - # - # Errors in the tests are caught by the final test status job. - name: Show logs for ${{ inputs.test_id }} test (heartwood) run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -709,7 +733,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -779,7 +803,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' @@ -855,16 +879,16 @@ jobs: # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. # Normally, if a job is skipped, all the jobs that depend on it are also skipped. # So we need to override the default success() check to make this job run. - if: ${{ !cancelled() && !failure() && inputs.saves_to_disk }} + if: ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }} permissions: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 @@ -944,6 +968,96 @@ jobs: echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> "$GITHUB_ENV" echo "TIME_SUFFIX=$TIME_SUFFIX" >> "$GITHUB_ENV" + # Get the full initial and running database versions from the test logs. + # These versions are used as part of the disk description and labels. + # + # If these versions are missing from the logs, the job fails. + # + # Typically, the database versions are around line 20 in the logs.. + # But we check the first 1000 log lines, just in case the test harness recompiles all the + # dependencies before running the test. (This can happen if the cache is invalid.) + # + # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, + # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. + - name: Get database versions from logs + run: | + INITIAL_DISK_DB_VERSION="" + RUNNING_DB_VERSION="" + DB_VERSION_SUMMARY="" + + DOCKER_LOGS=$( \ + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command=" \ + sudo docker logs ${{ inputs.test_id }} | head -1000 \ + ") + + # either a semantic version or "creating new database" + INITIAL_DISK_DB_VERSION=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching 'initial disk state version: [0-9a-z\.]+' | \ + grep --extended-regexp --only-matching '[0-9a-z\.]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + # Fail the tests, because Zebra didn't log the initial disk database version, + # or the regex in this step is wrong. + false + fi + + if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then + INITIAL_DISK_DB_VERSION="new" + else + INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}" + fi + + echo "Found initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + echo "INITIAL_DISK_DB_VERSION=$INITIAL_DISK_DB_VERSION" >> "$GITHUB_ENV" + + RUNNING_DB_VERSION=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching 'running state version: [0-9\.]+' | \ + grep --extended-regexp --only-matching '[0-9\.]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$RUNNING_DB_VERSION" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing running database version in logs: $RUNNING_DB_VERSION" + # Fail the tests, because Zebra didn't log the running database version, + # or the regex in this step is wrong. + false + fi + + RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}" + echo "Found running database version in logs: $RUNNING_DB_VERSION" + echo "RUNNING_DB_VERSION=$RUNNING_DB_VERSION" >> "$GITHUB_ENV" + + if [[ "$INITIAL_DISK_DB_VERSION" = "$RUNNING_DB_VERSION" ]]; then + DB_VERSION_SUMMARY="$RUNNING_DB_VERSION" + elif [[ "$INITIAL_DISK_DB_VERSION" = "new" ]]; then + DB_VERSION_SUMMARY="$RUNNING_DB_VERSION in new database" + else + DB_VERSION_SUMMARY="$INITIAL_DISK_DB_VERSION changing to $RUNNING_DB_VERSION" + fi + + echo "Summarised database versions from logs: $DB_VERSION_SUMMARY" + echo "DB_VERSION_SUMMARY=$DB_VERSION_SUMMARY" >> "$GITHUB_ENV" + # Get the sync height from the test logs, which is later used as part of the # disk description and labels. # @@ -953,7 +1067,7 @@ jobs: # # If the sync height is missing from the logs, the job fails. # - # Passes the sync height to subsequent steps using $SYNC_HEIGHT env variable. + # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. - name: Get sync height from logs run: | SYNC_HEIGHT="" @@ -971,12 +1085,16 @@ jobs: SYNC_HEIGHT=$( \ echo "$DOCKER_LOGS" | \ grep --extended-regexp --only-matching '${{ inputs.height_grep_text }}[0-9]+' | \ - grep --extended-regexp --only-matching '[0-9]+' | \ + grep --extended-regexp --only-matching '[0-9]+' | \ tail -1 || \ [[ $? == 1 ]] \ ) if [[ -z "$SYNC_HEIGHT" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" echo "Missing sync height in logs: $SYNC_HEIGHT" # Fail the tests, because Zebra and lightwalletd didn't log their sync heights, # or the CI workflow sync height regex is wrong. @@ -1038,15 +1156,15 @@ jobs: - name: Create image from state disk run: | MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT)) - if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]]; then + if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then gcloud compute images create \ "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ vars.GCP_ZONE }} \ --storage-location=us \ - --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }}" \ - --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},updated-from-height=${ORIGINAL_HEIGHT},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" + --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \ + --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" else echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT" fi @@ -1064,11 +1182,11 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 diff --git a/.github/workflows/dockerhub-description.yml b/.github/workflows/dockerhub-description.yml index fc0a58debb7..55a02bb920f 100644 --- a/.github/workflows/dockerhub-description.yml +++ b/.github/workflows/dockerhub-description.yml @@ -17,12 +17,12 @@ jobs: dockerHubDescription: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v3.4.1 + uses: peter-evans/dockerhub-description@v3.4.2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c2e84be3cbb..9c7ee5aa519 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -4,8 +4,8 @@ name: Docs # already in process, won't get cancelled. Instead, we let the first to complete # then queue the latest pending workflow, cancelling any workflows in between concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true on: workflow_dispatch: @@ -27,85 +27,196 @@ on: # workflow definitions - '.github/workflows/docs.yml' + pull_request: + branches: + - main + paths: + # doc source files + - 'book/**' + - '**/firebase.json' + - 'katex-header.html' + # rustdoc source files + - '**/*.rs' + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - '.github/workflows/docs.yml' + env: RUST_LOG: ${{ vars.RUST_LOG }} RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} + FIREBASE_CHANNEL: ${{ github.event_name == 'pull_request' && 'preview' || 'live' }} jobs: - build: - name: Build and Deploy Docs (+beta) - timeout-minutes: 45 + build-docs-book: + name: Build and Deploy Zebra Book Docs + timeout-minutes: 5 runs-on: ubuntu-latest + environment: docs + permissions: + checks: write + contents: 'read' + id-token: 'write' + pull-requests: write steps: - name: Checkout the source code - uses: actions/checkout@v3.5.2 + uses: actions/checkout@v3.5.3 with: persist-credentials: false - - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + - name: Setup mdBook + uses: jontze/action-mdbook@v2.2.1 with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' - repo-token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ secrets.GITHUB_TOKEN }} + mdbook-version: '~0.4' + use-linkcheck: true + use-mermaid: true - # Setup Rust with beta toolchain and default profile (to include rust-docs) - - name: Setup Rust + - name: Build Zebra book run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default + mdbook build book --dest-dir "$(pwd)"/target/book - - uses: Swatinem/rust-cache@v2.3.0 + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_FIREBASE_SA }}' - - name: Setup mdBook - uses: peaceiris/actions-mdbook@v1.2.0 + # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed + - run: | + # shellcheck disable=SC2002 + echo "GCP_FIREBASE_SA=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" + + - name: Deploy Zebra book to firebase + uses: FirebaseExtended/action-hosting-deploy@v0.7.1 with: - mdbook-version: '0.4.18' + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA }} + channelId: ${{ env.FIREBASE_CHANNEL }} + projectId: ${{ vars.GCP_PROJECT }} + target: docs-book - # TODO: actions-mdbook does not yet have an option to install mdbook-mermaid https://github.com/peaceiris/actions-mdbook/issues/426 - - name: Install mdbook - run: | - cargo install mdbook-mermaid + build-docs-external: + name: Build and Deploy Zebra External Docs + timeout-minutes: 45 + runs-on: ubuntu-latest + environment: docs + permissions: + checks: write + contents: 'read' + id-token: 'write' + pull-requests: write + steps: + - name: Checkout the source code + uses: actions/checkout@v3.5.3 + with: + persist-credentials: false - - name: Build Zebra book + - name: Install last version of Protoc + uses: arduino/setup-protoc@v2.0.0 + with: + version: '23.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Setup Rust with beta toolchain and default profile (to include rust-docs) + - name: Setup Rust run: | - mdbook build book/ + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default - - name: Deploy Zebra book to firebase - uses: w9jds/firebase-action@v11.30.1 - with: - args: deploy - env: - FIREBASE_TOKEN: ${{ secrets.FIREBASE_TOKEN }} - PROJECT_PATH: book/ - PROJECT_ID: zebra-book-b535f + - uses: Swatinem/rust-cache@v2.6.0 - name: Build external docs run: | # Exclude zebra-utils, it is not for library or app users - cargo doc --no-deps --workspace --all-features --exclude zebra-utils + cargo doc --no-deps --workspace --all-features --exclude zebra-utils --target-dir target/external env: RUSTDOCFLAGS: '--html-in-header katex-header.html' + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_FIREBASE_SA }}' + + # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed + - run: | + # shellcheck disable=SC2002 + echo "GCP_FIREBASE_SA=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" + - name: Deploy external docs to firebase - uses: w9jds/firebase-action@v11.30.1 + uses: FirebaseExtended/action-hosting-deploy@v0.7.1 with: - args: deploy - env: - FIREBASE_TOKEN: ${{ secrets.FIREBASE_TOKEN }} - PROJECT_ID: zebra-doc-external + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA }} + channelId: ${{ env.FIREBASE_CHANNEL }} + target: docs-external + projectId: ${{ vars.GCP_PROJECT }} + + build-docs-internal: + name: Build and Deploy Zebra Internal Docs + timeout-minutes: 45 + runs-on: ubuntu-latest + environment: docs + permissions: + checks: write + contents: 'read' + id-token: 'write' + pull-requests: write + steps: + - name: Checkout the source code + uses: actions/checkout@v3.5.3 + with: + persist-credentials: false + + - name: Install last version of Protoc + uses: arduino/setup-protoc@v2.0.0 + with: + version: '23.x' + repo-token: ${{ secrets.GITHUB_TOKEN }} + + # Setup Rust with beta toolchain and default profile (to include rust-docs) + - name: Setup Rust + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default + + - uses: Swatinem/rust-cache@v2.6.0 - name: Build internal docs run: | - cargo doc --no-deps --workspace --all-features --document-private-items + cargo doc --no-deps --workspace --all-features --document-private-items --target-dir target/internal env: RUSTDOCFLAGS: '--html-in-header katex-header.html' + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_FIREBASE_SA }}' + + # TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed + - run: | + # shellcheck disable=SC2002 + echo "GCP_FIREBASE_SA=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV" + - name: Deploy internal docs to firebase - uses: w9jds/firebase-action@v11.30.1 + uses: FirebaseExtended/action-hosting-deploy@v0.7.1 with: - args: deploy - env: - FIREBASE_TOKEN: ${{ secrets.FIREBASE_TOKEN }} - PROJECT_ID: zebra-doc-internal-e9fd4 + repoToken: ${{ secrets.GITHUB_TOKEN }} + firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA }} + channelId: ${{ env.FIREBASE_CHANNEL }} + target: docs-internal + projectId: ${{ vars.GCP_PROJECT }} diff --git a/.github/workflows/find-cached-disks.yml b/.github/workflows/find-cached-disks.yml index d92d176726e..7c07259ac82 100644 --- a/.github/workflows/find-cached-disks.yml +++ b/.github/workflows/find-cached-disks.yml @@ -30,7 +30,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: 0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8ad8e2a2f20..b1133910a09 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -30,14 +30,14 @@ jobs: rust: ${{ steps.changed-files-rust.outputs.any_changed == 'true' }} workflows: ${{ steps.changed-files-workflows.outputs.any_changed == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false fetch-depth: 0 - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v35.9.2 + uses: tj-actions/changed-files@v37.5.1 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v35.9.2 + uses: tj-actions/changed-files@v37.5.1 with: files: | .github/workflows/*.yml @@ -62,15 +62,15 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Check workflow permissions @@ -86,7 +86,7 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default - - uses: Swatinem/rust-cache@v2.3.0 + - uses: Swatinem/rust-cache@v2.6.0 with: shared-key: "clippy-cargo-lock" @@ -112,16 +112,16 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and default profile @@ -131,7 +131,7 @@ jobs: # We don't cache `fmt` outputs because the job is quick, # and we want to use the limited GitHub actions cache space for slower jobs. - #- uses: Swatinem/rust-cache@v2.3.0 + #- uses: Swatinem/rust-cache@v2.6.0 - run: | cargo fmt --all -- --check @@ -145,22 +145,22 @@ jobs: # cargo doc doesn't support '-- -D warnings', so we have to add it here # https://github.com/rust-lang/cargo/issues/8424#issuecomment-774662296 # - # These -A and -W settings must be the same as the rustdoc settings in: - # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L53 + # The -A and -W settings must be the same as the `rustdocflags` in: + # https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L87 env: RUSTDOCFLAGS: -D warnings -A rustdoc::private_intra_doc_links steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false - - uses: r7kamura/rust-problem-matchers@v1.3.0 + - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Install last version of Protoc - uses: arduino/setup-protoc@v1.2.0 + uses: arduino/setup-protoc@v2.0.0 with: # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed - version: '3.20.1' + version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} # Setup Rust with stable toolchain and default profile @@ -177,17 +177,20 @@ jobs: needs: changed-files if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - - uses: actions/checkout@v3.5.2 - - uses: reviewdog/action-actionlint@v1.37.0 + - uses: actions/checkout@v3.5.3 + - name: actionlint + uses: reviewdog/action-actionlint@v1.37.1 with: level: warning fail_on_error: false + - name: validate-dependabot + uses: marocchino/validate-dependabot@v2.1.0 codespell: runs-on: ubuntu-latest needs: changed-files steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 - uses: plettich/action-codespell@master with: github_token: ${{ secrets.github_token }} diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index d307fd1ee51..a96c15c2867 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -18,39 +18,28 @@ jobs: # Each time this workflow is executed, a build will be triggered to create a new image # with the corresponding tags using information from git - # The image will be named `zebrad:` + # The image will be named `zebra:` build: name: Build Release Docker uses: ./.github/workflows/build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime - image_name: zebrad - network: Mainnet - checkpoint_sync: true - rust_backtrace: '1' - zebra_skip_ipv6_tests: '1' + image_name: zebra rust_log: info # This step needs access to Docker Hub secrets to run successfully secrets: inherit - # The image will be named `zebrad-mining-rpcs-testnet:.experimental` + # The image will be named `zebra:.experimental` build-mining-testnet: name: Build Release Testnet Mining Docker uses: ./.github/workflows/build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime - image_name: zebrad-mining-rpcs-testnet - # TODO: change this to `-experimental` when we release Zebra `1.0.0` + image_name: zebra tag_suffix: .experimental - network: Testnet - rpc_port: '18232' - features: "sentry getblocktemplate-rpcs" - test_features: "" - checkpoint_sync: true - rust_backtrace: '1' - zebra_skip_ipv6_tests: '1' + features: "default-release-binaries getblocktemplate-rpcs" rust_log: info # This step needs access to Docker Hub secrets to run successfully secrets: inherit diff --git a/.github/workflows/release-crates-io.patch.yml b/.github/workflows/release-crates-io.patch.yml new file mode 100644 index 00000000000..e8f18d6c755 --- /dev/null +++ b/.github/workflows/release-crates-io.patch.yml @@ -0,0 +1,28 @@ +name: Release crates + +on: + # Only patch the Release PR test job + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints (and proptest regressions, which are not actually needed) + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # READMEs, which are shown on the crate page + - '**/README.md' + # workflow definitions + - '.github/workflows/release-crates.io.yml' + + +jobs: + check-release: + name: Check crate release dry run + runs-on: ubuntu-latest + steps: + - run: 'echo "No check required"' diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml new file mode 100644 index 00000000000..cd0764b9041 --- /dev/null +++ b/.github/workflows/release-crates-io.yml @@ -0,0 +1,125 @@ +# This workflow checks that Zebra's crates.io release script works. +# +# We use a separate action, because the changed files are different to a Continuous Deployment +# or Docker release. +# +# This workflow is triggered when: +# - A PR that changes Rust files, a README, or this workflow is opened or updated +# - A change is pushed to the main branch +# +# TODO: +# If we decide to automate crates.io releases, we can also publish crates using this workflow, when: +# - A release is published +# - A pre-release is changed to a release + +name: Release crates + +# Ensures that only one workflow task will run at a time. Previous releases, if +# already in process, won't get cancelled. Instead, we let the first release complete, +# then queue the latest pending workflow, cancelling any workflows in between. +# +# Since the different event types do very different things (test vs release), +# we can run different event types concurrently. +# +# For pull requests, we only run the tests from this workflow, and don't do any releases. +# So an in-progress pull request gets cancelled, just like other tests. +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + + +on: +# disabled for now +# release: +# types: +# - released + + # Only runs the release tests, doesn't release any crates. + # + # We test all changes on the main branch, just in case the PR paths are too strict. + push: + branches: + - main + + pull_request: + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints (and proptest regressions, which are not actually needed) + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # READMEs, which are shown on the crate page + - '**/README.md' + # workflow definitions + - '.github/workflows/release-crates.io.yml' + + +jobs: + # Test that Zebra can be released to crates.io using `cargo`. + # This checks that Zebra's dependencies and release configs are correct. + check-release: + name: Check crate release dry run + timeout-minutes: 15 + runs-on: ubuntu-latest + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Checkout git repository + uses: actions/checkout@v3.5.3 + with: + persist-credentials: false + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Setup Rust with stable toolchain and minimal profile + - name: Setup Rust + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal + + - name: Install cargo-release + uses: baptiste0928/cargo-install@v2.1.0 + with: + crate: cargo-release + + # Make sure Zebra can be released! + # + # These steps should be kept up to date with the release checklist. + # + # TODO: move these steps into a script which is run in the release checklist and CI + - name: Crate release dry run + run: | + set -ex + git config --global user.email "release-tests-no-reply@zfnd.org" + git config --global user.name "Automated Release Test" + # This script must be the same as: + # https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions + # with an extra `--no-confirm` argument for non-interactive testing. + cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad beta + cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch + cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad + cargo release commit --verbose --execute --no-confirm --allow-branch '*' + # Check the release will work using a dry run + # + # Workaround unpublished dependency version errors by skipping those crates: + # https://github.com/crate-ci/cargo-release/issues/691 + # + # TODO: check all crates after fixing these errors + cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad + + + # TODO: actually do the release here + #release-crates: + # name: Release Zebra Crates + # needs: [ check-release ] + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # if: ${{ !cancelled() && !failure() && github.event_name == 'release' }} + # steps: diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index f651d5ce98e..96c7376e243 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -56,13 +56,13 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: - repository: adityapk00/lightwalletd + repository: zcash/lightwalletd ref: 'master' persist-credentials: false - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: path: zebra persist-credentials: false @@ -75,7 +75,7 @@ jobs: # Automatic tag management and OCI Image Format Specification for labels - name: Docker meta id: meta - uses: docker/metadata-action@v4.4.0 + uses: docker/metadata-action@v4.6.0 with: # list of Docker images to use as base name for tags images: | @@ -121,7 +121,7 @@ jobs: uses: google-github-actions/setup-gcloud@v1.1.1 - name: Login to Google Artifact Registry - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: registry: us-docker.pkg.dev username: oauth2accesstoken @@ -130,7 +130,7 @@ jobs: # Build and push image to Google Artifact Registry - name: Build & push id: docker_build - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.1 with: target: build context: . diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/zcash-params.yml index 4574ad1f6b8..28bcea9a424 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/zcash-params.yml @@ -9,16 +9,25 @@ concurrency: on: workflow_dispatch: + inputs: + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false + push: branches: - 'main' paths: # parameter download code - 'zebra-consensus/src/primitives/groth16/params.rs' - - 'zebra-consensus/src/chain.rs' + - 'zebra-consensus/src/router.rs' + - 'zebrad/src/commands/download.rs' - 'zebrad/src/commands/start.rs' # workflow definitions - 'docker/zcash-params/Dockerfile' + - '.dockerignore' - '.github/workflows/zcash-params.yml' - '.github/workflows/build-docker-image.yml' @@ -30,7 +39,7 @@ jobs: dockerfile_path: ./docker/zcash-params/Dockerfile dockerfile_target: release image_name: zcash-params + no_cache: ${{ inputs.no_cache || false }} rust_backtrace: full rust_lib_backtrace: full - colorbt_show_hidden: '1' rust_log: info diff --git a/.github/workflows/zcashd-manual-deploy.yml b/.github/workflows/zcashd-manual-deploy.yml index e2d6eb004ee..142708504d1 100644 --- a/.github/workflows/zcashd-manual-deploy.yml +++ b/.github/workflows/zcashd-manual-deploy.yml @@ -22,7 +22,7 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v3.5.2 + - uses: actions/checkout@v3.5.3 with: persist-credentials: false diff --git a/.gitignore b/.gitignore index 07b32ac0abe..ef29d45439f 100644 --- a/.gitignore +++ b/.gitignore @@ -63,7 +63,8 @@ flycheck_*.el ### Firebase ### .idea **/node_modules/* -**/.firebaserc +# We need to check in the .firebaserc file because it contains the target names +# **/.firebaserc ### Firebase Patch ### .runtimeconfig.json diff --git a/CHANGELOG.md b/CHANGELOG.md index ef19f83b3f0..16bf41b2694 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,200 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.1.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.1.0) - 2023-07-18 + + +This release adds new mempool metrics, fixes panics when cancelling tasks on shutdown, detects subcommand name typos on the command-line, and improves the usability of Zebra's Docker images (particularly for mining). + +### Breaking Changes + +- Zebra now detects subcommand name typos on the command-line. If you want to give Zebra a list of tracing filters, use `zebrad start --filters debug,...` ([#7056](https://github.com/ZcashFoundation/zebra/pull/7056)) + +### Security + +- Avoid initiating outbound handshakes with IPs for which Zebra already has an active peer ([#7029](https://github.com/ZcashFoundation/zebra/pull/7029)) +- Rate-limit inbound connections per IP ([#7041](https://github.com/ZcashFoundation/zebra/pull/7041)) + +### Added + +- Metrics tracking mempool actions and size bucketed by weight ([#7019](https://github.com/ZcashFoundation/zebra/pull/7019)) by @str4d +- Legacy state format compatibility layer and version bumps for ECC dependencies to match `zcashd` 5.6.0 ([#7053](https://github.com/ZcashFoundation/zebra/pull/7053)) +- Framework for upcoming in-place database format upgrades ([#7031](https://github.com/ZcashFoundation/zebra/pull/7031)) + + +### Changed + +- Deduplicate note commitment trees in non-finalized state ([#7218](https://github.com/ZcashFoundation/zebra/pull/7218), [#7239](https://github.com/ZcashFoundation/zebra/pull/7239)) + +### Fixed + +- Enable miners running Zebra with Docker to set their address for mining rewards ([#7178](https://github.com/ZcashFoundation/zebra/pull/7178)) +- Use default RPC port when running Zebra with Docker ([#7177](https://github.com/ZcashFoundation/zebra/pull/7177), [#7162](https://github.com/ZcashFoundation/zebra/pull/7162)) +- Stop panicking on async task cancellation on shutdown in network and state futures ([#7219](https://github.com/ZcashFoundation/zebra/pull/7219)) +- Remove redundant startup logs, fix progress bar number, order, and wording ([#7087](https://github.com/ZcashFoundation/zebra/pull/7087)) +- Organize Docker `ENV` and `ARG` values based on their usage ([#7200](https://github.com/ZcashFoundation/zebra/pull/7200)) +- Avoid blocking threads by awaiting proof verification results from rayon in async context ([#6887](https://github.com/ZcashFoundation/zebra/pull/6887)) + + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @gustavovalverde, @mpguerra, @oxarbitrage, @str4d, @teor2345 and @upbqdn + + +## [Zebra 1.0.1](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.1) - 2023-07-03 + +Zebra's first patch release fixes multiple peer connection security issues and panics. It also significantly reduces Zebra's CPU usage. We recommend that all users upgrade to Zebra 1.0.1 or later. + +As of this release, Zebra requires Rust 1.70 to build. macOS builds are no longer officially supported by the Zebra team. + +If you're running `zebrad` in a terminal, you'll see a new Zebra welcome message. + +Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) + +### Breaking Changes + +This release has the following breaking changes: +- Zebra limits each IP address to 1 peer connection, to prevent denial of service attacks. This can be changed using the `network.max_connections_per_ip` config. ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980), [#6993](https://github.com/ZcashFoundation/zebra/pull/6993), [#7013](https://github.com/ZcashFoundation/zebra/pull/7013)). + Thank you to @dimxy from komodo for reporting this bug, and the Ziggurat team for demonstrating + its impact on testnet. +- Zebra uses new APIs in Rust 1.70 to prevent concurrency bugs that could cause hangs or panics + ([#7032](https://github.com/ZcashFoundation/zebra/pull/7032)). + +### Support Changes + +These platforms are no longer supported by the Zebra team: +- macOS has been moved from tier 2 to [tier 3 support](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/supported-platforms.md#tier-3) ([#6965](https://github.com/ZcashFoundation/zebra/pull/6965)). We disabled our regular macOS builds because Rust 1.70 [causes crashes during shutdown on macOS x86_64 (#6812)](https://github.com/ZcashFoundation/zebra/issues/6812). Zebra's state uses database transactions, so it should not be corrupted by the crash. + +### Security + +- Use Arc::into\_inner() to avoid potential hangs or panics ([#7032](https://github.com/ZcashFoundation/zebra/pull/7032)) +- Replace openssl with rustls in tests and experimental features ([#7047](https://github.com/ZcashFoundation/zebra/pull/7047)) + +#### Network Security + +- Fix long delays in accepting inbound handshakes, and delays in async operations throughout Zebra. ([#7103](https://github.com/ZcashFoundation/zebra/pull/7103)). Thank you to the Ziggurat Team for reporting this bug. +- Limit each IP address to 1 peer connection, to prevent denial of service attacks. ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980), [#6993](https://github.com/ZcashFoundation/zebra/pull/6993)) +- Close new peer connections from the same IP and port, rather than replacing the older connection ([#6980](https://github.com/ZcashFoundation/zebra/pull/6980)) +- Reduce inbound service overloads and add a timeout ([#6950](https://github.com/ZcashFoundation/zebra/pull/6950)) +- Stop panicking when handling inbound connection handshakes ([#6984](https://github.com/ZcashFoundation/zebra/pull/6984)) +- Stop panicking on shutdown in the syncer and network ([#7104](https://github.com/ZcashFoundation/zebra/pull/7104)) + +### Added + +- Make the maximum number of connections per IP configurable ([#7013](https://github.com/ZcashFoundation/zebra/pull/7013)) +- Make it easier to modify Zebra's config inside the Docker image ([#7045](https://github.com/ZcashFoundation/zebra/pull/7045)) +- Print a Zebra logo and welcome text if stderr is terminal ([#6945](https://github.com/ZcashFoundation/zebra/pull/6945), [#7075](https://github.com/ZcashFoundation/zebra/pull/7075), [#7095](https://github.com/ZcashFoundation/zebra/pull/7095), [#7102](https://github.com/ZcashFoundation/zebra/pull/7102)) + +### Changed + +- Move macOS to tier 3 support ([#6965](https://github.com/ZcashFoundation/zebra/pull/6965)) +- Install from crates.io in the README, rather than a git release tag ([#6977](https://github.com/ZcashFoundation/zebra/pull/6977)) +- Add extra timeout logging to peer TCP connections ([#6969](https://github.com/ZcashFoundation/zebra/pull/6969)) + +### Fixed + +- Stop overwriting custom user configs inside Zebra's Docker image ([#7045](https://github.com/ZcashFoundation/zebra/pull/7045)) +- Stop Zebra using 100% CPU even when idle ([#7103](https://github.com/ZcashFoundation/zebra/pull/7103)), thank you to james_katz for reporting this bug +- Avoid potential hangs in the `tokio` async runtime ([#7094](https://github.com/ZcashFoundation/zebra/pull/7094)) +- Replace or add RPC content type header to support `zcashd` RPC examples ([#6885](https://github.com/ZcashFoundation/zebra/pull/6885)) +- Make `zebra-network` licensing clearer ([#6995](https://github.com/ZcashFoundation/zebra/pull/6995)) + +#### Configuration + +- Ignore error from loading config if running the 'generate' or 'download' commands ([#7014](https://github.com/ZcashFoundation/zebra/pull/7014)) +- Apply force\_color to panic logs ([#6997](https://github.com/ZcashFoundation/zebra/pull/6997)) + +#### Logging & Error Handling + +- Log a zebra-network task cancel on shutdown, rather than panicking ([#7078](https://github.com/ZcashFoundation/zebra/pull/7078)) +- Fix incorrect function spans in some logs ([#6923](https://github.com/ZcashFoundation/zebra/pull/6923), [#6995](https://github.com/ZcashFoundation/zebra/pull/6995)) +- Replace a state validation chain length assertion with a NotReadyToBeCommitted error ([#7072](https://github.com/ZcashFoundation/zebra/pull/7072)) + +#### Experimental Feature Fixes + +- Add an elasticsearch feature to block serialize to fix experimental build failures ([#6709](https://github.com/ZcashFoundation/zebra/pull/6709)) +- Prevent progress bar from panicking by disabling limits that are never reached ([#6940](https://github.com/ZcashFoundation/zebra/pull/6940)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @conradoplg, @dconnolly, @dimxy from komodo, james_katz, @oxarbitrage, @teor2345, @upbqdn, and the Ziggurat team. + + +## [Zebra 1.0.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0) - 2023-06-14 + +This is our 1.0.0 stable release. + +This release also fixes a panic at startup when parsing the app version, [publishes `zebrad` to crates.io](https://crates.io/crates/zebrad), and [publishes to Docker Hub under the `latest` tag](https://hub.docker.com/r/zfnd/zebra/tags). + +Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) + +### Security + +- Avoid potential concurrency bugs in outbound handshakes ([#6869](https://github.com/ZcashFoundation/zebra/pull/6869)) + +### Changed + +- Publish to [crates.io](https://crates.io/crates/zebrad) ([#6908](https://github.com/ZcashFoundation/zebra/pull/6908)) +- Rename tower-batch to tower-batch-control ([#6907](https://github.com/ZcashFoundation/zebra/pull/6907)) +- Upgrade to ed25519-zebra 4.0.0 ([#6881](https://github.com/ZcashFoundation/zebra/pull/6881)) + +### Fixed + +- Stop panicking at startup when parsing the app version ([#6888](https://github.com/ZcashFoundation/zebra/pull/6888)) +- Avoid a race condition in testing modified JoinSplits ([#6921](https://github.com/ZcashFoundation/zebra/pull/6921)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@dconnolly, @gustavovalverde, @oxarbitrage, @teor2345 and @upbqdn + + +## [Zebra 1.0.0-rc.9](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.9) - 2023-06-07 + +This release continues to address audit findings. It fixes multiple network protocol and RPC bugs, +and reduces sensitive information logging. + +This is the last release candidate before the 1.0.0 stable release. Please report bugs to [the Zebra GitHub repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) + +### Breaking Changes + +- The version subcommand has been replaced with a --version/-V flag ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) + +### Security + +- Stop logging peer IP addresses, to protect user privacy ([#6662](https://github.com/ZcashFoundation/zebra/pull/6662)) +- Stop logging potentially sensitive user information from unmined transactions ([#6616](https://github.com/ZcashFoundation/zebra/pull/6616)) +- Rate-limit MetaAddrChange::Responded from peers ([#6738](https://github.com/ZcashFoundation/zebra/pull/6738)) +- Ignore out of order Address Book changes, unless they are concurrent ([#6717](https://github.com/ZcashFoundation/zebra/pull/6717)) +- Limit blocks and transactions sent in response to a single request ([#6679](https://github.com/ZcashFoundation/zebra/pull/6679)) +- Rate-limit and size-limit peer transaction ID messages ([#6625](https://github.com/ZcashFoundation/zebra/pull/6625)) +- Stop panicking on state RPC or block requests with very large heights ([#6699](https://github.com/ZcashFoundation/zebra/pull/6699)) +- Try harder to drop connections when they shut down, Credit: Ziggurat Team ([#6832](https://github.com/ZcashFoundation/zebra/pull/6832)) +- Randomly drop connections when inbound service is overloaded ([#6790](https://github.com/ZcashFoundation/zebra/pull/6790)) + +### Added + +- Report compiler version and Zebra features when starting Zebra ([#6606](https://github.com/ZcashFoundation/zebra/pull/6606)) +- Update Zebra book summary to include supported platforms, platform tier policy, and versioning ([#6683](https://github.com/ZcashFoundation/zebra/pull/6683)) +- Improve zebrad's help output, credit to @Rqnsom ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) +- Cache a list of useful peers on disk ([#6739](https://github.com/ZcashFoundation/zebra/pull/6739)) +- Make the first stable release forward-compatible with planned state changes ([#6813](https://github.com/ZcashFoundation/zebra/pull/6813)) + +### Fixed + +- Limit RPC failure log length, add details to RPC failure logs ([#6754](https://github.com/ZcashFoundation/zebra/pull/6754)) +- Allow inbound connections to Zebra running in Docker ([#6755](https://github.com/ZcashFoundation/zebra/pull/6755)) +- Zebra now accepts filters for the start command when no subcommand is provided ([#6801](https://github.com/ZcashFoundation/zebra/pull/6801)) +- Avoid panicking on state errors during shutdown ([#6828](https://github.com/ZcashFoundation/zebra/pull/6828)) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @mpguerra, @oxarbitrage, @teor2345 and @upbqdn + + ## [Zebra 1.0.0-rc.8](https://github.com/ZcashFoundation/zebra/releases/tag/v1.0.0-rc.8) - 2023-05-10 Starting in this release, Zebra has implemented an "end of support" halt. Just like `zcashd`, the `zebrad` binary will stop running 16 weeks after the last release date. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4788d60abd9..7d8ce532628 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,8 +14,7 @@ how to build, run, and instrument Zebra. ## Bug Reports [bug-reports]: #bug-reports -[File an issue](https://github.com/ZcashFoundation/zebra/issues/new/choose) -on the issue tracker using the bug report template. +Please [create an issue](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) on the Zebra issue tracker. ## Pull Requests [pull-requests]: #pull-requests diff --git a/Cargo.lock b/Cargo.lock index acf7fedb717..84225549cd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,51 +4,48 @@ version = 3 [[package]] name = "abscissa_core" -version = "0.5.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a07677093120a02583717b6dd1ef81d8de1e8d01bd226c83f0f9bdf3e56bb3a" +checksum = "8346a52bf3fb445d5949d144c37360ad2f1d7950cfcc6d4e9e4999b1cd1bd42a" dependencies = [ "abscissa_derive", + "arc-swap", "backtrace", "canonical-path", - "chrono", - "color-backtrace", - "generational-arena", - "gumdrop", - "libc", + "clap 4.3.21", + "color-eyre", + "fs-err", "once_cell", "regex", "secrecy", - "semver 0.9.0", + "semver 1.0.18", "serde", - "signal-hook", "termcolor", "toml 0.5.11", "tracing", "tracing-log", - "tracing-subscriber 0.1.6", + "tracing-subscriber", "wait-timeout", ] [[package]] name = "abscissa_derive" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f5722bc48763cb9d81d8427ca05b6aa2842f6632cf8e4c0a29eef9baececcc" +checksum = "55bfb86e57d13c06e482c570826ddcddcc8f07fab916760e8911141d4fda8b62" dependencies = [ - "darling 0.10.2", "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure", ] [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -71,26 +68,15 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if 1.0.0", "cipher", "cpufeatures", ] -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.9", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.3" @@ -98,28 +84,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] [[package]] -name = "aho-corasick" -version = "1.0.1" +name = "allocator-api2" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" -dependencies = [ - "memchr", -] +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" @@ -138,27 +127,73 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "ansi_term" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ "winapi", ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "anstream" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" dependencies = [ - "winapi", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", ] [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" + +[[package]] +name = "arc-swap" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" @@ -168,15 +203,15 @@ checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11" +checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" dependencies = [ "flate2", "futures-core", @@ -202,20 +237,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -237,9 +272,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c" dependencies = [ "async-trait", "axum-core", @@ -282,15 +317,15 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] @@ -309,9 +344,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" @@ -368,13 +403,13 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "prettyplease 0.2.4", - "proc-macro2 1.0.56", - "quote 1.0.27", + "prettyplease 0.2.10", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "rustc-hash", "shlex", - "syn 2.0.15", + "syn 2.0.26", "which", ] @@ -387,7 +422,7 @@ dependencies = [ "hmac", "pbkdf2", "rand 0.8.5", - "sha2 0.10.6", + "sha2", "unicode-normalization", "zeroize", ] @@ -415,9 +450,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.1" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" [[package]] name = "bitflags-serde-legacy" @@ -425,7 +460,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.3", "serde", ] @@ -463,15 +498,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -494,20 +520,30 @@ dependencies = [ "subtle", ] +[[package]] +name = "bridgetree" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a813dadc684e4c78a4547757debd99666282545d90e4ccc3210913ed4337ad2" +dependencies = [ + "incrementalmerkletree", +] + [[package]] name = "bs58" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" dependencies = [ - "sha2 0.9.9", + "sha2", + "tinyvec", ] [[package]] name = "bstr" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", "serde", @@ -515,9 +551,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.2" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -631,17 +667,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", - "js-sys", - "num-integer", "num-traits", "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] @@ -700,55 +733,65 @@ version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ - "ansi_term 0.12.1", + "ansi_term", "atty", "bitflags 1.3.2", "strsim 0.8.0", - "textwrap 0.11.0", + "textwrap", "unicode-width", "vec_map", ] [[package]] name = "clap" -version = "3.2.25" +version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" dependencies = [ - "bitflags 1.3.2", - "clap_lex", - "indexmap", - "textwrap 0.16.0", + "clap_builder", + "clap_derive", + "once_cell", ] [[package]] -name = "clap_lex" -version = "0.2.4" +name = "clap_builder" +version = "4.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" dependencies = [ - "os_str_bytes", + "anstream", + "anstyle", + "clap_lex", + "once_cell", + "strsim 0.10.0", ] [[package]] -name = "codespan-reporting" -version = "0.11.1" +name = "clap_derive" +version = "4.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ - "termcolor", - "unicode-width", + "heck 0.4.1", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] -name = "color-backtrace" -version = "0.3.0" +name = "clap_lex" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65d13f1078cc63c791d0deba0dd43db37c9ec02b311f10bed10b577016f3a957" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" dependencies = [ - "atty", - "backtrace", "termcolor", + "unicode-width", ] [[package]] @@ -779,17 +822,23 @@ dependencies = [ "tracing-error", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "console" -version = "0.15.5" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -806,9 +855,9 @@ dependencies = [ [[package]] name = "console-subscriber" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ab2224a0311582eb03adba4caaf18644f7b1f10a760803a803b9b605187fc7" +checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" dependencies = [ "console-api", "crossbeam-channel", @@ -825,24 +874,14 @@ dependencies = [ "tonic", "tracing", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] name = "constant_time_eq" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" - -[[package]] -name = "core-foundation" -version = "0.9.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] +checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" [[package]] name = "core-foundation-sys" @@ -852,9 +891,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -870,19 +909,19 @@ dependencies = [ [[package]] name = "criterion" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ "anes", - "atty", "cast", "ciborium", - "clap 3.2.25", + "clap 4.3.21", "criterion-plot", - "itertools", - "lazy_static", + "is-terminal", + "itertools 0.10.5", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", @@ -901,7 +940,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -927,9 +966,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -940,9 +979,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", ] @@ -965,96 +1004,72 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.0.0-rc.2" +version = "4.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +checksum = "436ace70fc06e06f7f689d2624dc4e2f0ea666efb5aa704215f7249ae6e047a7" dependencies = [ "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest", "fiat-crypto", - "packed_simd_2", "platforms", + "rustc_version 0.4.0", "serde", "subtle", "zeroize", ] [[package]] -name = "cxx" -version = "1.0.94" +name = "curve25519-dalek-derive" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] -name = "cxx-build" -version = "1.0.94" +name = "cxx" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" +checksum = "109308c20e8445959c2792e81871054c6a17e6976489a93d2769641a2ba5839c" dependencies = [ "cc", - "codespan-reporting", - "once_cell", - "proc-macro2 1.0.56", - "quote 1.0.27", - "scratch", - "syn 2.0.15", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", ] [[package]] name = "cxx-gen" -version = "0.7.94" +version = "0.7.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee165c38de64e6761c2f38b7e9beee0721110f8585165987ef9db2a753ee4176" +checksum = "400bb5c322e41b40e0014270ed5759b377eab9cb5c8754d82342548c6a719483" dependencies = [ "codespan-reporting", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "cxxbridge-flags" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" +checksum = "882074421238e84fe3b4c65d0081de34e5b323bf64555d3e61991f76eb64a7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", -] - -[[package]] -name = "darling" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" -dependencies = [ - "darling_core 0.10.2", - "darling_macro 0.10.2", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1069,26 +1084,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" -dependencies = [ - "darling_core 0.20.1", - "darling_macro 0.20.1", -] - -[[package]] -name = "darling_core" -version = "0.10.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", - "strsim 0.9.3", - "syn 1.0.109", + "darling_core 0.20.3", + "darling_macro 0.20.3", ] [[package]] @@ -1099,35 +1100,24 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.10.0", "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "strsim 0.10.0", - "syn 2.0.15", -] - -[[package]] -name = "darling_macro" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" -dependencies = [ - "darling_core 0.10.2", - "quote 1.0.27", - "syn 1.0.109", + "syn 2.0.26", ] [[package]] @@ -1137,19 +1127,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.27", + "quote 1.0.31", "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ - "darling_core 0.20.1", - "quote 1.0.27", - "syn 2.0.15", + "darling_core 0.20.3", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1164,33 +1154,15 @@ dependencies = [ [[package]] name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "crypto-common", "subtle", ] -[[package]] -name = "directories" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" -dependencies = [ - "dirs-sys", -] - [[package]] name = "dirs" version = "5.0.1" @@ -1218,29 +1190,40 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "dyn-clone" -version = "1.0.11" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" + +[[package]] +name = "ed25519" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" +checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" +dependencies = [ + "serde", + "signature", +] [[package]] name = "ed25519-zebra" -version = "3.1.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6" +checksum = "ffb0d653b2c06ec7ec1b4c570bb4eac748035d6f44dd14e5fd9e7e2549938488" dependencies = [ - "curve25519-dalek 3.2.0", - "hashbrown 0.12.3", + "curve25519-dalek", + "ed25519", + "hashbrown 0.14.0", "hex", "rand_core 0.6.4", "serde", - "sha2 0.9.9", + "sha2", "zeroize", ] @@ -1305,6 +1288,12 @@ dependencies = [ "byteorder", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "errno" version = "0.3.1" @@ -1347,12 +1336,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" [[package]] name = "ff" @@ -1396,7 +1382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide 0.7.1", + "miniz_oxide", ] [[package]] @@ -1418,26 +1404,11 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -1450,12 +1421,18 @@ checksum = "26c4b37de5ae15812a764c958297cfc50f5c010438f60c6ce75d11b802abd404" dependencies = [ "cbc", "cipher", - "libm 0.2.6", + "libm", "num-bigint", "num-integer", "num-traits", ] +[[package]] +name = "fs-err" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" + [[package]] name = "funty" version = "2.0.0" @@ -1516,9 +1493,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -1551,15 +1528,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generational-arena" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d3b771574f62d0548cee0ad9057857e9fc25d7a3335f140c84f6acd0bf601" -dependencies = [ - "cfg-if 0.1.10", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1583,9 +1551,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -1596,15 +1564,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "git2" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7905cdfe33d31a88bb2e8419ddd054451f5432d1da9eaf2ac7804ee1ea12d5" +checksum = "7b989d6a7ca95a362cf2cfc5ad688b3a467be1f87e480b8dad07fee8c79b0044" dependencies = [ "bitflags 1.3.2", "libc", @@ -1621,11 +1589,11 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" +checksum = "1391ab1f92ffcc08911957149833e682aa3fe252b9f45f966d2ef972274c97df" dependencies = [ - "aho-corasick 0.7.20", + "aho-corasick", "bstr", "fnv", "log", @@ -1644,31 +1612,11 @@ dependencies = [ "subtle", ] -[[package]] -name = "gumdrop" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee50908bc1beeac1f2902e0b4e0cd0d844e716f5ebdc6f0cfc1163fe5e10bcde" -dependencies = [ - "gumdrop_derive", -] - -[[package]] -name = "gumdrop_derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90454ce4de40b7ca6a8968b5ef367bdab48413962588d0d2b1638d60090c35d7" -dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 1.0.109", -] - [[package]] name = "h2" -version = "0.3.18" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -1676,7 +1624,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util 0.7.8", @@ -1734,17 +1682,24 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ - "ahash 0.7.6", + "ahash", ] [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ - "ahash 0.8.3", + "ahash", + "allocator-api2", ] [[package]] @@ -1762,14 +1717,15 @@ dependencies = [ [[package]] name = "hdwallet" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd89bf343be18dbe1e505100e48168bbd084760e842a8fed0317d2361470193" +checksum = "5a03ba7d4c9ea41552cd4351965ff96883e629693ae85005c501bb4b9e1c48a7" dependencies = [ "lazy_static", "rand_core 0.6.4", "ring", "secp256k1", + "thiserror", ] [[package]] @@ -1798,18 +1754,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1826,7 +1773,16 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest", +] + +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", ] [[package]] @@ -1903,9 +1859,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -1927,13 +1883,14 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", - "rustls 0.21.1", + "rustls 0.21.5", "tokio", "tokio-rustls", ] @@ -1950,24 +1907,11 @@ dependencies = [ "tokio-io-timeout", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1979,12 +1923,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -1995,9 +1938,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2018,18 +1961,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] [[package]] name = "incrementalmerkletree" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5ad43a3f5795945459d577f6589cf62a476e92c79b75e70cd954364e14ce17b" +checksum = "2eb91780c91bfc79769006a55c49127b83e1c1a6cf2b3b149ce3f247cbe342f0" dependencies = [ - "serde", + "either", ] [[package]] @@ -2049,15 +1992,27 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", + "serde", +] + [[package]] name = "indicatif" -version = "0.17.3" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" +checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" dependencies = [ "console", + "instant", "number_prefix", - "portable-atomic 0.3.20", + "portable-atomic", "unicode-width", ] @@ -2067,7 +2022,7 @@ version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc" dependencies = [ - "ahash 0.8.3", + "ahash", "is-terminal", "itoa", "log", @@ -2089,9 +2044,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.29.0" +version = "1.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a28d25139df397cbca21408bb742cf6837e04cdbebf1b07b760caf971d6a972" +checksum = "a0770b0a3d4c70567f0d58331f3088b0e4c4f56c9b8d764efe654b4a5d46de3a" dependencies = [ "console", "lazy_static", @@ -2113,31 +2068,19 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "io-lifetimes" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" -dependencies = [ - "hermit-abi 0.3.1", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes", + "hermit-abi 0.3.2", "rustix", "windows-sys 0.48.0", ] @@ -2151,11 +2094,20 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" @@ -2168,9 +2120,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.62" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c16e1bfd491478ab155fd8b4896b86f9ede344949b641e61501e07c2b8b4d5" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -2197,8 +2149,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2250,6 +2202,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "known-folders" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b6f1427d9c43b1cce87434c4d9eca33f43bdbb6246a762aa823a582f74c1684" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -2267,15 +2228,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libgit2-sys" -version = "0.15.1+1.6.4" +version = "0.15.2+1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4577bde8cdfc7d6a2a4bcb7b049598597de33ffd337276e9c7db6cd4a2cee7" +checksum = "a80df2e11fb4a61f4ba2ab42dbe7f74468da143f1a75c74e11dee7c813f694fa" dependencies = [ "cc", "libc", @@ -2295,15 +2256,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" - -[[package]] -name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "librocksdb-sys" @@ -2334,9 +2289,9 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +checksum = "9d240c6f7e1ba3a28b0249f774e6a9dd0175054b52dfbb61b16eb8505c3785c9" dependencies = [ "cc", ] @@ -2349,15 +2304,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.7" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -2365,12 +2320,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "lz4-sys" @@ -2397,22 +2349,13 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -2431,12 +2374,6 @@ dependencies = [ "rayon", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" version = "2.5.0" @@ -2445,9 +2382,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -2463,24 +2400,13 @@ dependencies = [ [[package]] name = "metrics" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849" -dependencies = [ - "ahash 0.7.6", - "metrics-macros 0.6.0", - "portable-atomic 0.3.20", -] - -[[package]] -name = "metrics" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa8ebbd1a9e57bbab77b9facae7f5136aea44c356943bf9a198f647da64285d6" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ - "ahash 0.8.3", - "metrics-macros 0.7.0", - "portable-atomic 1.3.2", + "ahash", + "metrics-macros", + "portable-atomic", ] [[package]] @@ -2489,49 +2415,38 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "hyper", - "indexmap", + "indexmap 1.9.3", "ipnet", - "metrics 0.21.0", + "metrics", "metrics-util", "quanta", "thiserror", "tokio", ] -[[package]] -name = "metrics-macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3" -dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 1.0.109", -] - [[package]] name = "metrics-macros" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "metrics-util" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111cb375987443c3de8d503580b536f77dc8416d32db62d9456db5d93bd7ac47" +checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.2", - "metrics 0.21.0", + "hashbrown 0.13.1", + "metrics", "num_cpus", "quanta", "sketches-ddsketch", @@ -2549,15 +2464,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.7.1" @@ -2569,27 +2475,26 @@ dependencies = [ [[package]] name = "minreq" -version = "2.7.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41979ac2a5aa373c6e294b4a67fbe5e428e91a4cd0524376681f2bc6d872399b" +checksum = "3de406eeb24aba36ed3829532fa01649129677186b44a49debec0ec574ca7da7" dependencies = [ "log", "once_cell", "rustls 0.20.8", "webpki", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2610,32 +2515,14 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.9", -] - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", + "getrandom 0.2.10", ] [[package]] name = "net2" -version = "0.2.38" +version = "0.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" +checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" dependencies = [ "cfg-if 0.1.10", "libc", @@ -2706,16 +2593,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm 0.2.6", + "libm", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.2", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ - "hermit-abi 0.2.6", "libc", ] @@ -2727,18 +2623,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.30.3" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "oorandom" @@ -2752,50 +2648,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.52" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" -dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "option-ext" version = "0.2.0" @@ -2804,9 +2656,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchard" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6f418f2c25573923f81a091f38b4b19bc20f6c92b5070fb8f0711e64a2b998" +checksum = "5f4e7a52f510cb8c39e639e662a353adbaf86025478af89ae54a0551f8ca35e2" dependencies = [ "aes", "bitvec", @@ -2851,26 +2703,11 @@ dependencies = [ "winapi", ] -[[package]] -name = "os_str_bytes" -version = "6.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" - [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "owo-colors" @@ -2878,16 +2715,6 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" -[[package]] -name = "packed_simd_2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" -dependencies = [ - "cfg-if 1.0.0", - "libm 0.1.4", -] - [[package]] name = "pairing" version = "0.23.0" @@ -2899,9 +2726,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.5.0" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", "bitvec", @@ -2913,13 +2740,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2941,7 +2768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.7", + "parking_lot_core 0.9.8", ] [[package]] @@ -2954,21 +2781,21 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.10.0", + "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.16", - "smallvec 1.10.0", - "windows-sys 0.45.0", + "redox_syscall 0.3.5", + "smallvec", + "windows-targets 0.48.1", ] [[package]] @@ -3003,7 +2830,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "digest 0.10.6", + "digest", "password-hash", ] @@ -3015,15 +2842,15 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" +checksum = "0d2d1d55045829d65aad9d389139882ad623b33b904e7c9f1b10c5b8927298e5" dependencies = [ "thiserror", "ucd-trie", @@ -3031,9 +2858,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" +checksum = "5f94bca7e7a599d89dea5dfa309e217e7906c3c007fb9c3299c40b10d6a315d3" dependencies = [ "pest", "pest_generator", @@ -3041,26 +2868,26 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" +checksum = "99d490fe7e8556575ff6911e45567ab95e71617f43781e5c05490dc8d75c965c" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "pest_meta" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" +checksum = "2674c66ebb4b4d9036012091b537aae5878970d6999f81a265034d85b136b341" dependencies = [ "once_cell", "pest", - "sha2 0.10.6", + "sha2", ] [[package]] @@ -3070,34 +2897,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 1.9.3", ] [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -3119,9 +2946,9 @@ checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "num-traits", "plotters-backend", @@ -3132,15 +2959,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] @@ -3158,18 +2985,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" -dependencies = [ - "portable-atomic 1.3.2", -] - -[[package]] -name = "portable-atomic" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59d1bcc64fc5d021d67521f818db868368028108d37f0e98d74e33f68297b5" +checksum = "edc55135a600d700580e406b4de0d59cb9ad25e344a3a091a97ded2622ec4ec6" [[package]] name = "ppv-lite86" @@ -3183,18 +3001,18 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.56", + "proc-macro2 1.0.66", "syn 1.0.109", ] [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "92139198957b410250d43fad93e630d956499a625c527eda65175c8680f83387" dependencies = [ - "proc-macro2 1.0.56", - "syn 2.0.15", + "proc-macro2 1.0.66", + "syn 2.0.26", ] [[package]] @@ -3234,8 +3052,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "version_check", ] @@ -3246,8 +3064,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "version_check", ] @@ -3262,25 +3080,24 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1b898011ce9595050a68e60f90bad083ff2987a695a42357134c8381fba70" +checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", "bitflags 1.3.2", "byteorder", "lazy_static", "num-traits", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", @@ -3319,7 +3136,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck 0.4.1", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -3340,9 +3157,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", - "proc-macro2 1.0.56", - "quote 1.0.27", + "itertools 0.10.5", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3357,9 +3174,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc73c42f9314c4bdce450c77e6f09ecbddefbeddb1b5979ded332a3913ded33" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", @@ -3377,12 +3194,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-xml" version = "0.26.0" @@ -3410,8 +3221,8 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3426,11 +3237,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" dependencies = [ - "proc-macro2 1.0.56", + "proc-macro2 1.0.66", ] [[package]] @@ -3498,7 +3309,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -3552,9 +3363,9 @@ dependencies = [ [[package]] name = "reddsa" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b34d2c0df43159d2ff79d3cf929c9f11415529127344edb8160ad2be499fcd" +checksum = "78a5191930e84973293aa5f532b513404460cd2216c1cfb76d08748c15b40b02" dependencies = [ "blake2b_simd", "byteorder", @@ -3605,20 +3416,21 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.8.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ - "aho-corasick 1.0.1", + "aho-corasick", "memchr", - "regex-syntax 0.7.1", + "regex-automata 0.3.6", + "regex-syntax 0.7.4", ] [[package]] @@ -3630,6 +3442,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.7.4", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -3638,9 +3461,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "reqwest" @@ -3649,7 +3472,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ "async-compression", - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -3659,22 +3482,19 @@ dependencies = [ "http-body", "hyper", "hyper-rustls", - "hyper-tls", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.1", + "rustls 0.21.5", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-native-tls", "tokio-rustls", "tokio-util 0.7.8", "tower-service", @@ -3682,7 +3502,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 0.22.6", "winreg", ] @@ -3716,14 +3536,14 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.6", + "digest", ] [[package]] name = "rlimit" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a29d87a652dc4d43c586328706bb5cdff211f3f39a530f240b53f7221dab8e" +checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" dependencies = [ "libc", ] @@ -3782,18 +3602,17 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.17", + "semver 1.0.18", ] [[package]] name = "rustix" -version = "0.37.19" +version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.3", "errno", - "io-lifetimes", "libc", "linux-raw-sys", "windows-sys 0.48.0", @@ -3813,23 +3632,23 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.1", "sct", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] @@ -3842,11 +3661,21 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.101.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rusty-fork" @@ -3855,16 +3684,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" @@ -3875,26 +3704,11 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" -dependencies = [ - "windows-sys 0.42.0", -] - [[package]] name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scratch" -version = "1.0.5" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -3908,9 +3722,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.21.3" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" +checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" dependencies = [ "secp256k1-sys", "serde", @@ -3918,46 +3732,23 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.4.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" +checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" dependencies = [ "cc", ] [[package]] name = "secrecy" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ "serde", "zeroize", ] -[[package]] -name = "security-framework" -version = "2.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "0.9.0" @@ -3965,14 +3756,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ "semver-parser", - "serde", ] [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +dependencies = [ + "serde", +] [[package]] name = "semver-parser" @@ -3982,27 +3775,27 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "sentry" -version = "0.31.0" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c3d7f8bf7373e75222452fcdd9347d857452a92d0eec738f941bc4656c5b5df" +checksum = "01b0ad16faa5d12372f914ed40d00bda21a6d1bdcc99264c5e5e1c9495cf3654" dependencies = [ "httpdate", "reqwest", - "rustls 0.20.8", + "rustls 0.21.5", "sentry-backtrace", "sentry-contexts", "sentry-core", "sentry-tracing", "tokio", "ureq", - "webpki-roots", + "webpki-roots 0.22.6", ] [[package]] name = "sentry-backtrace" -version = "0.31.0" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b7cdefbdca51f1146f0f24a3cb4ecb6428951f030ff5c720cfb5c60bd174c0" +checksum = "11f2ee8f147bb5f22ac59b5c35754a759b9a6f6722402e2a14750b2a63fc59bd" dependencies = [ "backtrace", "once_cell", @@ -4012,9 +3805,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.0" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af4cb29066e0e8df0cc3111211eb93543ccb09e1ccbe71de6d88b4bb459a2b1" +checksum = "dcd133362c745151eeba0ac61e3ba8350f034e9fe7509877d08059fe1d7720c6" dependencies = [ "hostname", "libc", @@ -4026,9 +3819,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.0" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e781b55761e47a60d1ff326ae8059de22b0e6b0cee68eab1c5912e4fb199a76" +checksum = "7163491708804a74446642ff2c80b3acd668d4b9e9f497f85621f3d250fd012b" dependencies = [ "once_cell", "rand 0.8.5", @@ -4039,37 +3832,38 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.0" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4692bfc5bf69a8a41ccb0ce96612686eddb2406e32f7113f536efa15949af8" +checksum = "5aca8b88978677a27ee1a91beafe4052306c474c06f582321fde72d2e2cc2f7f" dependencies = [ + "sentry-backtrace", "sentry-core", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.31.0" +version = "0.31.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d642a04657cc77d8de52ae7c6d93a15cb02284eb219344a89c1e2b26bbaf578c" +checksum = "9e7a88e0c1922d19b3efee12a8215f6a8a806e442e665ada71cc222cab72985f" dependencies = [ "debugid", - "getrandom 0.2.9", + "getrandom 0.2.10", "hex", "serde", "serde_json", "thiserror", - "time 0.3.21", + "time", "url", "uuid", ] [[package]] name = "serde" -version = "1.0.163" +version = "1.0.179" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0" dependencies = [ "serde_derive", ] @@ -4085,22 +3879,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.179" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ - "indexmap", + "indexmap 2.0.0", "itoa", "ryu", "serde", @@ -4108,9 +3902,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -4139,18 +3933,19 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.0.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02d8aa6e3c385bf084924f660ce2a3a6bd333ba55b35e8590b321f35d88513" +checksum = "1402f54f9a3b9e2efe71c1cea24e648acce55887983553eeb858cf3115acfd49" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "chrono", "hex", - "indexmap", + "indexmap 1.9.3", + "indexmap 2.0.0", "serde", "serde_json", - "serde_with_macros 3.0.0", - "time 0.3.21", + "serde_with_macros 3.2.0", + "time", ] [[package]] @@ -4160,45 +3955,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] [[package]] name = "serde_with_macros" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" -dependencies = [ - "darling 0.20.1", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", -] - -[[package]] -name = "sha2" -version = "0.9.9" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "9197f1ad0e3c173a0222d3c4404fb04c3afe87e962bcb327af73e8301fa203c7" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "darling 0.20.3", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.6", + "digest", ] [[package]] @@ -4216,16 +3998,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" -[[package]] -name = "signal-hook" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -4235,6 +4007,12 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" + [[package]] name = "similar" version = "2.2.1" @@ -4258,18 +4036,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "socket2" @@ -4298,8 +4067,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4318,12 +4087,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -4342,12 +4105,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -[[package]] -name = "strsim" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" - [[package]] name = "strsim" version = "0.10.0" @@ -4373,8 +4130,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4401,19 +4158,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.15" +version = "2.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] @@ -4429,8 +4186,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -4443,15 +4200,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" dependencies = [ "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", "rustix", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4472,30 +4229,24 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] @@ -4510,22 +4261,13 @@ dependencies = [ [[package]] name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", + "libc", + "num_threads", "serde", "time-core", "time-macros", @@ -4539,9 +4281,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] @@ -4573,11 +4315,12 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.1" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", + "backtrace", "bytes", "libc", "mio", @@ -4607,28 +4350,18 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.1", + "rustls 0.21.5", "tokio", ] @@ -4696,9 +4429,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" dependencies = [ "serde", "serde_spanned", @@ -4708,20 +4441,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.9" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -4736,7 +4469,7 @@ checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", "axum", - "base64 0.21.0", + "base64 0.21.2", "bytes", "futures-core", "futures-util", @@ -4763,9 +4496,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease 0.1.25", - "proc-macro2 1.0.56", + "proc-macro2 1.0.66", "prost-build", - "quote 1.0.27", + "quote 1.0.31", "syn 1.0.109", ] @@ -4778,7 +4511,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap", + "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand 0.8.5", @@ -4791,8 +4524,8 @@ dependencies = [ ] [[package]] -name = "tower-batch" -version = "0.2.39" +name = "tower-batch-control" +version = "0.2.41-beta.4" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4816,7 +4549,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.39" +version = "0.2.41-beta.4" dependencies = [ "futures-core", "pin-project", @@ -4872,26 +4605,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel", - "time 0.3.21", - "tracing-subscriber 0.3.17", + "time", + "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -4904,7 +4637,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4915,7 +4648,7 @@ checksum = "0bae117ee14789185e129aaee5d93750abe67fdc5a9a62650452bfe4e122a3a9" dependencies = [ "lazy_static", "tracing", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4936,7 +4669,7 @@ checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" dependencies = [ "libc", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] @@ -4950,35 +4683,18 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-subscriber" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "192ca16595cdd0661ce319e8eede9c975f227cdaabc4faaefdc256f43d852e45" -dependencies = [ - "ansi_term 0.11.0", - "chrono", - "lazy_static", - "matchers 0.0.1", - "owning_ref", - "regex", - "smallvec 0.6.14", - "tracing-core", - "tracing-log", -] - [[package]] name = "tracing-subscriber" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ - "matchers 0.1.0", + "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", - "smallvec 1.10.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -4993,7 +4709,7 @@ checksum = "3a2c0ff408fe918a94c428a3f2ad04e4afd5c95bbc08fcf868eff750c15728a4" dependencies = [ "lazy_static", "tracing-core", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "tracing-test-macro", ] @@ -5004,7 +4720,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" dependencies = [ "lazy_static", - "quote 1.0.27", + "quote 1.0.31", "syn 1.0.109", ] @@ -5022,9 +4738,9 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" @@ -5070,9 +4786,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -5109,9 +4825,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -5125,24 +4841,24 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.6.2" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338b31dd1314f68f3aabf3ed57ab922df95ffcd902476ca7ba3c4ce7b908c46d" +checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "log", "once_cell", - "rustls 0.20.8", + "rustls 0.21.5", + "rustls-webpki 0.100.1", "url", - "webpki", - "webpki-roots", + "webpki-roots 0.23.1", ] [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", @@ -5150,13 +4866,19 @@ dependencies = [ "serde", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "serde", ] @@ -5180,15 +4902,15 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.1.3" +version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e03272e388fb78fc79481a493424f78d77be1d55f21bcd314b5a6716e195afe" +checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", "git2", "rustc_version 0.4.0", "rustversion", - "time 0.3.21", + "time", ] [[package]] @@ -5224,11 +4946,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -5238,12 +4959,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5252,9 +4967,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6cb788c4e39112fbe1822277ef6fb3c55cd86b95cb3d3c4c1c9597e4ac74b4" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5262,24 +4977,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e522ed4105a9d626d885b35d62501b30d9666283a5c8be12c14a8bdafe7822" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.35" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "083abe15c5d88556b77bdf7aef403625be9e327ad37c62c4e4129af740168163" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5289,38 +5004,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "358a79a0cb89d21db8120cbfb91392335913e4890665b1a7981d9e956903b434" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.27", + "quote 1.0.31", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.85" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a901d592cafaa4d711bc324edfaff879ac700b19c3dfd60058d2b445be2691eb" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.62" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b5f940c7edfdc6d12126d98c9ef4d1b3d470011c47c76a6581df47ad9ba721" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -5345,6 +5060,15 @@ dependencies = [ "webpki", ] +[[package]] +name = "webpki-roots" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +dependencies = [ + "rustls-webpki 0.100.1", +] + [[package]] name = "which" version = "4.4.0" @@ -5393,22 +5117,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.48.1", ] [[package]] @@ -5426,7 +5135,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -5446,9 +5155,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", @@ -5545,9 +5254,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +checksum = "81fac9742fd1ad1bd9643b991319f72dd031016d44b77039a26977eb667141e7" dependencies = [ "memchr", ] @@ -5572,16 +5281,25 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-rc.2" +version = "2.0.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" +checksum = "ec7fae07da688e17059d5886712c933bb0520f15eff2e09cfa18e30968f4e63a" dependencies = [ - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek", "rand_core 0.6.4", "serde", "zeroize", ] +[[package]] +name = "xdg" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688597db5a750e9cad4511cb94729a078e274308099a0382b5b8203bbc767fee" +dependencies = [ + "home", +] + [[package]] name = "yaml-rust" version = "0.4.5" @@ -5593,9 +5311,9 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52be35a205369d480378646bff9c9fedafd8efe8af1e0e54bb858f405883f2b2" +checksum = "8944af5c206cf2e37020ad54618e1825501b98548d35a638b73e0ec5762df8d5" dependencies = [ "bech32", "bs58", @@ -5626,9 +5344,9 @@ dependencies = [ [[package]] name = "zcash_note_encryption" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb2149e6cd5fbee36c5b87c601715a8c35554602f7fe84af38b636afa2db318" +checksum = "5b4580cd6cee12e44421dac43169be8d23791650816bdb34e6ddfa70ac89c1c5" dependencies = [ "chacha20", "chacha20poly1305", @@ -5639,9 +5357,9 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914d2195a478d5b63191584dff126f552751115181857b290211ec88e68acc3e" +checksum = "de1a231e6a58d3dcdd6e21d229db33d7c10f9b54d8c170e122b267f6826bb48f" dependencies = [ "aes", "bip0039", @@ -5666,7 +5384,7 @@ dependencies = [ "rand_core 0.6.4", "ripemd", "secp256k1", - "sha2 0.10.6", + "sha2", "subtle", "zcash_address", "zcash_encoding", @@ -5675,34 +5393,38 @@ dependencies = [ [[package]] name = "zcash_proofs" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c8147884952748b00aa443d36511ae2d7b49acfec74cfd39c0959fbb61ef14" +checksum = "59d2e066a717f28451a081f2ebd483ddda896cf00d572972c10979d645ffa6c4" dependencies = [ "bellman", "blake2b_simd", "bls12_381", - "directories", "group", + "home", + "incrementalmerkletree", "jubjub", + "known-folders", "lazy_static", "minreq", "rand_core 0.6.4", "redjubjub", "tracing", + "xdg", "zcash_primitives", ] [[package]] name = "zcash_script" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5d794b254efc2759d249b477f53faa751f67543a4b4d1c7a5ff7df212d4ba5" +checksum = "8c4f95043fd34d402b8d5debb0e54a28c2b84fc99591f5973b4999e9c5b01bfd" dependencies = [ "bellman", "bindgen", "blake2b_simd", "bls12_381", + "bridgetree", "byteorder", "cc", "crossbeam-channel", @@ -5713,8 +5435,9 @@ dependencies = [ "jubjub", "libc", "memuse", - "metrics 0.20.1", + "metrics", "orchard", + "rand 0.8.5", "rand_core 0.6.4", "rayon", "subtle", @@ -5729,13 +5452,14 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.3", "bitflags-serde-legacy", "bitvec", "blake2b_simd", "blake2s_simd", + "bridgetree", "bs58", "byteorder", "chrono", @@ -5750,7 +5474,7 @@ dependencies = [ "hex", "humantime", "incrementalmerkletree", - "itertools", + "itertools 0.11.0", "jubjub", "lazy_static", "num-integer", @@ -5769,8 +5493,8 @@ dependencies = [ "serde", "serde-big-array", "serde_json", - "serde_with 3.0.0", - "sha2 0.9.9", + "serde_with 3.2.0", + "sha2", "spandoc", "static_assertions", "thiserror", @@ -5789,7 +5513,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "bellman", "blake2b_simd", @@ -5804,7 +5528,7 @@ dependencies = [ "howudoin", "jubjub", "lazy_static", - "metrics 0.21.0", + "metrics", "num-integer", "once_cell", "orchard", @@ -5818,12 +5542,12 @@ dependencies = [ "tinyvec", "tokio", "tower", - "tower-batch", + "tower-batch-control", "tower-fallback", "tracing", "tracing-error", "tracing-futures", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "zcash_proofs", "zebra-chain", "zebra-node-services", @@ -5834,19 +5558,22 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.3", "byteorder", "bytes", "chrono", + "dirs", "futures", "hex", "howudoin", "humantime-serde", - "indexmap", + "indexmap 2.0.0", + "itertools 0.11.0", "lazy_static", - "metrics 0.21.0", + "metrics", + "num-integer", "ordered-map", "pin-project", "proptest", @@ -5856,11 +5583,12 @@ dependencies = [ "regex", "serde", "static_assertions", + "tempfile", "thiserror", "tokio", "tokio-stream", "tokio-util 0.7.8", - "toml 0.7.4", + "toml 0.7.6", "tower", "tracing", "tracing-error", @@ -5871,7 +5599,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5883,13 +5611,13 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "chrono", "futures", "hex", "hyper", - "indexmap", + "indexmap 2.0.0", "insta", "jsonrpc-core", "jsonrpc-derive", @@ -5915,7 +5643,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "displaydoc", "hex", @@ -5928,7 +5656,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "bincode", "chrono", @@ -5939,20 +5667,22 @@ dependencies = [ "halo2_proofs", "hex", "howudoin", - "indexmap", + "indexmap 2.0.0", "insta", - "itertools", + "itertools 0.11.0", "jubjub", "lazy_static", - "metrics 0.21.0", + "metrics", "mset", "once_cell", "proptest", "proptest-derive", + "rand 0.8.5", "rayon", "regex", "rlimit", "rocksdb", + "semver 1.0.18", "serde", "serde_json", "spandoc", @@ -5968,13 +5698,13 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "color-eyre", "futures", "hex", "humantime", - "indexmap", + "indexmap 2.0.0", "insta", "lazy_static", "once_cell", @@ -5990,16 +5720,16 @@ dependencies = [ "tower", "tracing", "tracing-error", - "tracing-subscriber 0.3.17", + "tracing-subscriber", ] [[package]] name = "zebra-utils" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" dependencies = [ "color-eyre", "hex", - "itertools", + "itertools 0.11.0", "regex", "reqwest", "serde_json", @@ -6008,7 +5738,7 @@ dependencies = [ "tinyvec", "tokio", "tracing-error", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "zebra-chain", "zebra-node-services", "zebra-rpc", @@ -6016,27 +5746,27 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.0.0-rc.8" +version = "1.1.0" dependencies = [ "abscissa_core", "atty", "chrono", + "clap 4.3.21", "color-eyre", "console-subscriber", "dirs", "futures", - "gumdrop", "hex", "howudoin", "humantime-serde", "hyper", - "indexmap", + "indexmap 2.0.0", "indicatif", "inferno", "jsonrpc-core", "lazy_static", "log", - "metrics 0.21.0", + "metrics", "metrics-exporter-prometheus", "num-integer", "once_cell", @@ -6047,7 +5777,7 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", - "semver 1.0.17", + "semver 1.0.18", "sentry", "serde", "serde_json", @@ -6056,7 +5786,7 @@ dependencies = [ "tinyvec", "tokio", "tokio-stream", - "toml 0.7.4", + "toml 0.7.6", "tonic", "tonic-build", "tower", @@ -6066,7 +5796,7 @@ dependencies = [ "tracing-flame", "tracing-futures", "tracing-journald", - "tracing-subscriber 0.3.17", + "tracing-subscriber", "tracing-test", "vergen", "zebra-chain", @@ -6094,7 +5824,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.56", - "quote 1.0.27", - "syn 2.0.15", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", ] diff --git a/Cargo.toml b/Cargo.toml index 48ded2707ac..0f81f34fa45 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,10 +10,15 @@ members = [ "zebra-node-services", "zebra-test", "zebra-utils", - "tower-batch", + "tower-batch-control", "tower-fallback", ] +# Use the edition 2021 dependency resolver in the workspace, to match the crates +resolver = "2" + +# Compilation settings + [profile.dev] panic = "abort" diff --git a/README.md b/README.md index 362f19c8aff..e83171a7443 100644 --- a/README.md +++ b/README.md @@ -7,79 +7,58 @@ ## Contents -- [Contents](#contents) - [About](#about) - - [Using Zebra](#using-zebra) -- [Release Candidates](#release-candidates) - [Getting Started](#getting-started) - [Docker](#docker) - [Building Zebra](#building-zebra) - [Optional Features](#optional-features) - - [Configuring JSON-RPC for lightwalletd](#configuring-json-rpc-for-lightwalletd) - [Network Ports](#network-ports) - [Known Issues](#known-issues) - [Future Work](#future-work) - [Documentation](#documentation) +- [User support](#user-support) - [Security](#security) - [License](#license) ## About [Zebra](https://zebra.zfnd.org/) is the Zcash Foundation's independent, -consensus-compatible implementation of a Zcash node, currently under -development. It can be used to join the Zcash peer-to-peer network, which helps -keeping Zcash working by validating and broadcasting transactions, and maintaining -the Zcash blockchain state in a distributed manner. +consensus-compatible implementation of a Zcash node. -[Zcash](https://doc.zebra.zfnd.org/zebrad/index.html#about-zcash) -is a cryptocurrency designed to preserve the user's privacy. -If you just want to send and receive Zcash then you don't need to use Zebra -directly. You can download a Zcash wallet application which will handle that -for you. +Zebra's network stack is interoperable with `zcashd`, and Zebra implements all +the features required to reach Zcash network consensus, including the validation +of all the consensus rules for the NU5 network upgrade. +[Here](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) are some +benefits of Zebra. -Please [join us on Discord](https://discord.gg/na6QZNd) if you'd -like to find out more or get involved! +Zebra validates blocks and transactions, but needs extra software to generate +them: -### Using Zebra +- To generate transactions, [run Zebra with + `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html). +- To generate blocks, [enable mining + support](https://zebra.zfnd.org/user/mining.html), and use a mining pool or + miner with Zebra's mining JSON-RPCs. Mining support is currently incomplete, + experimental, and off by default. -You would want to run Zebra if you want to contribute to the -Zcash network: the more nodes are run, the more reliable the network will be -in terms of speed and resistance to denial of service attacks, for example. - -Zebra aims to be [faster, more secure, and more easily extensible](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-advantages) -than other Zcash implementations. - -## Release Candidates - -Every few weeks, we release a [new Zebra version](https://github.com/ZcashFoundation/zebra/releases). - -Zebra's network stack is interoperable with `zcashd`, -and Zebra implements all the features required to reach Zcash network consensus. -Currently, Zebra validates all of the Zcash consensus rules for the NU5 network upgrade. - -Zebra validates blocks and transactions, but needs extra software to generate them: - -- to generate transactions, [configure `zebrad`'s JSON-RPC port](https://github.com/ZcashFoundation/zebra#configuring-json-rpc-for-lightwalletd), - and use a light wallet with `lightwalletd` and Zebra. -- to generate blocks, [compile `zebrad` with the `getblocktemplate-rpcs` feature](https://doc.zebra.zfnd.org/zebrad/#json-rpc), configure the JSON-RPC port, - and use a mining pool or miner with Zebra's mining JSON-RPCs. - Mining support is currently incomplete, experimental, and off by default. +Please [join us on Discord](https://discord.gg/na6QZNd) if you'd like to find +out more or get involved! ## Getting Started You can run Zebra using our Docker image or you can build it manually. Please -see the [requirements section of the Zebra Book](https://zebra.zfnd.org/user/requirements.html) for system -requirements. +see the [System Requirements](https://zebra.zfnd.org/user/requirements.html) +section in the Zebra book for system requirements. ### Docker This command will run our latest release, and sync it to the tip: ```sh -docker run zfnd/zebra:1.0.0-rc.8 +docker run zfnd/zebra:latest ``` -For more information, read our [Docker documentation](book/src/user/docker.md). +For more information, read our [Docker documentation](https://zebra.zfnd.org/user/docker.html). ### Building Zebra @@ -88,12 +67,16 @@ Building Zebra requires [Rust](https://www.rust-lang.org/tools/install), [pkg-config](http://pkgconf.org/), and a C++ compiler. Zebra is tested with the latest `stable` Rust version. Earlier versions are not -supported or tested. Note that Zebra's code currently uses features introduced -in Rust 1.68, or any later stable release. +supported or tested. Any Zebra release can start depending on new features in the +latest stable Rust. + +Every few weeks, we release a [new Zebra version](https://github.com/ZcashFoundation/zebra/releases). Below are quick summaries for installing the dependencies on your machine. -

General instructions for installing dependencies

+
+ +

General instructions for installing dependencies

1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install). @@ -107,7 +90,9 @@ Below are quick summaries for installing the dependencies on your machine.
-

Dependencies on Arch

+
+ +

Dependencies on Arch

```sh sudo pacman -S rust clang pkgconf @@ -117,10 +102,10 @@ Note that the package `clang` includes `libclang` as well as the C++ compiler.
-Once the dependencies are in place, you can build Zebra +Once the dependencies are in place, you can build and install Zebra: ```sh -cargo install --locked --git https://github.com/ZcashFoundation/zebra --tag v1.0.0-rc.8 zebrad +cargo install --locked zebrad ``` You can start Zebra by @@ -129,12 +114,13 @@ You can start Zebra by zebrad start ``` -See the [Running Zebra](https://zebra.zfnd.org/user/run.html) section in the -book for more details. +See the [Installing Zebra](https://zebra.zfnd.org/user/install.html) and [Running Zebra](https://zebra.zfnd.org/user/run.html) +sections in the book for more details. #### Optional Features You can also build Zebra with additional [Cargo features](https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options): + - `sentry` for [Sentry monitoring](https://zebra.zfnd.org/user/requirements.html#sentry-production-monitoring) - `journald` for [`journald` logging](https://zebra.zfnd.org/user/tracing.html#journald-logging) - `prometheus` for [Prometheus metrics](https://doc.zebra.zfnd.org/zebrad/#metrics) @@ -152,27 +138,6 @@ documentation](https://doc.zebra.zfnd.org/zebrad/index.html#zebra-feature-flags) Some debugging and monitoring features are disabled in release builds to increase performance. -### Configuring JSON-RPC for lightwalletd - -To use `zebrad` as a `lightwalletd` backend, give it this `~/.config/zebrad.toml`: - -```toml -[rpc] -# listen for RPC queries on localhost -listen_addr = '127.0.0.1:8232' - -# automatically use multiple CPU threads -parallel_cpu_threads = 0 -``` - -**WARNING:** This config allows multiple Zebra instances to share the same RPC port. -See the [RPC config documentation](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html) for details. - -`lightwalletd` also requires a `zcash.conf` file. - -It is recommended to use [adityapk00/lightwalletd](https://github.com/adityapk00/lightwalletd) because that is used in testing. -Other `lightwalletd` forks have limited support, see the [detailed `lightwalletd` instructions](https://github.com/ZcashFoundation/zebra/blob/main/book/src/user/lightwalletd.md#sync-lightwalletd). - ### Network Ports Zebra uses the following inbound and outbound TCP ports: @@ -188,20 +153,21 @@ section of the Zebra book for more details. There are a few bugs in Zebra that we're still working on fixing: +- Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release + - If Zebra fails downloading the Zcash parameters, use [the Zcash parameters download script](https://github.com/zcash/zcash/blob/master/zcutil/fetch-params.sh) instead. - Block download and verification sometimes times out during Zebra's initial sync [#5709](https://github.com/ZcashFoundation/zebra/issues/5709). The full sync still finishes reasonably quickly. +- Rust 1.70 [causes crashes during shutdown on macOS x86_64 (#6812)](https://github.com/ZcashFoundation/zebra/issues/6812). The state cache should stay valid despite the crash. + - No Windows support [#3801](https://github.com/ZcashFoundation/zebra/issues/3801). We used to test with Windows Server 2019, but not any more; see the issue for details. - Experimental Tor support is disabled until [Zebra upgrades to the latest `arti-client`](https://github.com/ZcashFoundation/zebra/issues/5492). This happened due to a Rust dependency conflict, which could only be resolved by `arti` upgrading to a version of `x25519-dalek` with the dependency fix. -- Output of `help`, `--help` flag, and usage of invalid commands or options are inconsistent [#5502](https://github.com/ZcashFoundation/zebra/issues/5502). See the issue for details. ## Future Work -The Zebra team is currently working towards an audited stable release. - We will continue to add new features as part of future network upgrades, and in response to community feedback. ## Documentation @@ -213,6 +179,12 @@ documentation](https://doc.zebra.zfnd.org) for the external API of our crates, as well as [internal documentation](https://doc-internal.zebra.zfnd.org) for private APIs. +## User support + +For bug reports please [open a bug report ticket in the Zebra repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=%5BUser+reported+bug%5D%3A+). + +Alternatively by chat, [Join the Zcash Foundation Discord Server](https://discord.com/invite/aRgNRVwsM8) and find the #zebra-support channel. + ## Security Zebra has a [responsible disclosure policy](https://github.com/ZcashFoundation/zebra/blob/main/SECURITY.md), which we encourage security researchers to follow. @@ -223,3 +195,7 @@ Zebra is distributed under the terms of both the MIT license and the Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT). + +Some Zebra crates are distributed under the [MIT license only](LICENSE-MIT), +because some of their code was originally from MIT-licensed projects. +See each crate's directory for details. diff --git a/book/firebase.json b/book/firebase.json deleted file mode 100644 index 6fe0e74e2c3..00000000000 --- a/book/firebase.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "hosting": { - "public": "book", - "ignore": [ - "firebase.json", - "**/.*", - "**/node_modules/**" - ], - "rewrites": [ - { - "source": "**", - "destination": "/index.html" - } - ] - } -} \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index a98c39c5e97..aa49967efcc 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -15,11 +15,20 @@ - [zk-SNARK Parameters](user/parameters.md) - [Mining](user/mining.md) - [Testnet Mining with s-nomp](user/mining-testnet-s-nomp.md) + - [Mining with Zebra in Docker](user/mining-docker.md) - [Kibana blockchain explorer](user/elasticsearch.md) + - [Troubleshooting](user/troubleshooting.md) - [Developer Documentation](dev.md) - [Contribution Guide](CONTRIBUTING.md) - [Design Overview](dev/overview.md) + - [Diagrams](dev/diagrams.md) + - [Network Architecture](dev/diagrams/zebra-network.md) + - [Upgrading the State Database](dev/state-db-upgrades.md) - [Zebra versioning and releases](dev/release-process.md) + - [Continuous Integration](dev/continuous-integration.md) + - [Continuous Delivery](dev/continuous-delivery.md) + - [Generating Zebra Checkpoints](dev/zebra-checkpoints.md) + - [Doing Mass Renames](dev/mass-renames.md) - [Zebra RFCs](dev/rfcs.md) - [Pipelinable Block Lookup](dev/rfcs/0001-pipelinable-block-lookup.md) - [Parallel Verification](dev/rfcs/0002-parallel-verification.md) @@ -31,9 +40,4 @@ - [V5 Transaction](dev/rfcs/0010-v5-transaction.md) - [Async Rust in Zebra](dev/rfcs/0011-async-rust-in-zebra.md) - [Value Pools](dev/rfcs/0012-value-pools.md) - - [Diagrams](dev/diagrams.md) - - [Network Architecture](dev/diagrams/zebra-network.md) - - [Continuous Integration](dev/continuous-integration.md) - - [Continuous Delivery](dev/continuous-delivery.md) - - [zebra-checkpoints](dev/zebra-checkpoints.md) - [API Reference](api.md) diff --git a/book/src/dev/diagrams/service-dependencies.svg b/book/src/dev/diagrams/service-dependencies.svg index 3ebd62bdf56..d6d02928dd5 100644 --- a/book/src/dev/diagrams/service-dependencies.svg +++ b/book/src/dev/diagrams/service-dependencies.svg @@ -74,15 +74,15 @@ - + -chain_verifier +block_verifier_router -chain_verifier +block_verifier_router - + -inbound->chain_verifier +inbound->block_verifier_router @@ -104,9 +104,9 @@ - + -rpc_server->chain_verifier +rpc_server->block_verifier_router @@ -116,9 +116,9 @@ checkpoint_verifier - + -chain_verifier->checkpoint_verifier +block_verifier_router->checkpoint_verifier @@ -128,9 +128,9 @@ block_verifier - + -chain_verifier->block_verifier +block_verifier_router->block_verifier @@ -146,9 +146,9 @@ syncer - + -syncer->chain_verifier +syncer->block_verifier_router diff --git a/book/src/dev/mass-renames.md b/book/src/dev/mass-renames.md new file mode 100644 index 00000000000..9240d873e7f --- /dev/null +++ b/book/src/dev/mass-renames.md @@ -0,0 +1,116 @@ +# Doing Mass Renames in Zebra Code + +Sometimes we want to rename a Rust type or function, or change a log message. + +But our types and functions are also used in our documentation, +so the compiler can sometimes miss when their names are changed. + +Our log messages are also used in our integration tests, +so changing them can lead to unexpected test failures or hangs. + +## Universal Renames with `sed` + +You can use `sed` to rename all the instances of a name in Zebra's code, documentation, and tests: +```sh +git ls-tree --full-tree -r --name-only HEAD | \ +xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g' +``` + +Or excluding specific paths: +```sh +git ls-tree --full-tree -r --name-only HEAD | \ +grep -v -e 'path-to-skip' -e 'other-path-to-skip' | \ +xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g' +``` + +`sed` also supports regular expressions to replace a pattern with another pattern. + +Here's how to make a PR with these replacements: +1. Run the `sed` commands +2. Run `cargo fmt --all` after doing all the replacements +3. Put the commands in the commit message and pull request, so the reviewer can check them + +Here's how to review that PR: +1. Check out two copies of the repository, one with the PR, and one without: +```sh +cd zebra +git fetch --all +# clear the checkout so we can use main elsewhere +git checkout main^ +# Use the base branch or commit for the PR, which is usually main +git worktree add ../zebra-sed main +git worktree add ../zebra-pr origin/pr-branch-name +``` + +2. Run the scripts on the repository without the PR: +```sh +cd ../zebra-sed +# run the scripts in the PR or commit message +git ls-tree --full-tree -r --name-only HEAD | \ +grep -v -e 'path-to-skip' -e 'other-path-to-skip' | \ +xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g' +cargo fmt --all +``` + +3. Automatically check that they match +```sh +cd .. +git diff zebra-sed zebra-pr +``` + +If there are no differences, then the PR can be approved. + +If there are differences, then post them as a review in the PR, +and ask the author to re-run the script on the latest `main`. + +## Interactive Renames with `fastmod` + +You can use `fastmod` to rename some instances, but skip others: +```sh +fastmod --hidden --fixed-strings "OldName" "NewName" [paths to change] +``` + +Using the `--hidden` flag does renames in `.github` workflows, issue templates, and other configs. + +`fastmod` also supports regular expressions to replace a pattern with another pattern. + +Here's how to make a PR with these replacements: +1. Run the `fastmod` commands, choosing which instances to replace +2. Run `cargo fmt --all` after doing all the replacements +3. Put the commands in the commit message and pull request, so the reviewer can check them +4. If there are a lot of renames: + - use `sed` on any directories or files that are always renamed, and put them in the first PR, + - do a cleanup using `fastmod` in the next PR. + +Here's how to review that PR: +1. Manually review each replacement (there's no shortcut) + +## Using `rustdoc` links to detect name changes + +When you're referencing a type or function in a doc comment, +use a `rustdoc` link to refer to it. + +This makes the documentation easier to navigate, +and our `rustdoc` lint will detect any typos or name changes. + +```rust +//! This is what `rustdoc` links look like: +//! - [`u32`] type or trait +//! - [`drop()`] function +//! - [`Clone::clone()`] method +//! - [`Option::None`] enum variant +//! - [`Option::Some(_)`](Option::Some) enum variant with data +//! - [`HashMap`](std::collections::HashMap) fully-qualified path +//! - [`BTreeSet`](std::collections::BTreeSet) fully-qualified path with generics +``` + +If a type isn't imported in the module or Rust prelude, +then it needs a fully-qualified path in the docs, or an unused import: +```rust +// For rustdoc +#[allow(unused_imports)] +use std::collections::LinkedList; + +//! Link to [`LinkedList`]. +struct Type; +``` diff --git a/book/src/dev/overview.md b/book/src/dev/overview.md index 7117f936081..2142be8b0fc 100644 --- a/book/src/dev/overview.md +++ b/book/src/dev/overview.md @@ -56,18 +56,18 @@ digraph services { inbound -> state rpc_server -> state mempool -> transaction_verifier - chain_verifier -> checkpoint_verifier + block_verifier_router -> checkpoint_verifier inbound -> mempool rpc_server -> mempool - inbound -> chain_verifier - syncer -> chain_verifier - rpc_server -> chain_verifier [style=dotted] + inbound -> block_verifier_router + syncer -> block_verifier_router + rpc_server -> block_verifier_router [style=dotted] syncer -> peer_set mempool -> peer_set block_verifier -> state checkpoint_verifier -> state block_verifier -> transaction_verifier - chain_verifier -> block_verifier + block_verifier_router -> block_verifier rpc_server -> inbound [style=invis] // for layout of the diagram } @@ -123,7 +123,7 @@ into several components: of blocks and transactions: all consensus rules that can be checked independently of the chain state, such as verification of signatures, proofs, and scripts. Internally, the library - uses [`tower-batch`](https://doc.zebra.zfnd.org/tower_batch/index.html) to + uses [`tower-batch-control`](https://doc.zebra.zfnd.org/tower_batch_control/index.html) to perform automatic, transparent batch processing of contemporaneous verification requests. diff --git a/book/src/dev/rfcs/0005-state-updates.md b/book/src/dev/rfcs/0005-state-updates.md index e47245ad175..7767975fd11 100644 --- a/book/src/dev/rfcs/0005-state-updates.md +++ b/book/src/dev/rfcs/0005-state-updates.md @@ -663,305 +663,7 @@ New `non-finalized` blocks are committed as follows: ## rocksdb data structures [rocksdb]: #rocksdb -rocksdb provides a persistent, thread-safe `BTreeMap<&[u8], &[u8]>`. Each map is -a distinct "tree". Keys are sorted using lex order on byte strings, so -integer values should be stored using big-endian encoding (so that the lex -order on byte strings is the numeric ordering). - -Note that the lex order storage allows creating 1-to-many maps using keys only. -For example, the `tx_loc_by_transparent_addr_loc` allows mapping each address -to all transactions related to it, by simply storing each transaction prefixed -with the address as the key, leaving the value empty. Since rocksdb allows -listing all keys with a given prefix, it will allow listing all transactions -related to a given address. - -We use the following rocksdb column families: - -| Column Family | Keys | Values | Changes | -| ---------------------------------- | ---------------------- | ----------------------------- | ------- | -| *Blocks* | | | | -| `hash_by_height` | `block::Height` | `block::Hash` | Create | -| `height_by_hash` | `block::Hash` | `block::Height` | Create | -| `block_header_by_height` | `block::Height` | `block::Header` | Create | -| *Transactions* | | | | -| `tx_by_loc` | `TransactionLocation` | `Transaction` | Create | -| `hash_by_tx_loc` | `TransactionLocation` | `transaction::Hash` | Create | -| `tx_loc_by_hash` | `transaction::Hash` | `TransactionLocation` | Create | -| *Transparent* | | | | -| `balance_by_transparent_addr` | `transparent::Address` | `Amount \|\| AddressLocation` | Update | -| `tx_loc_by_transparent_addr_loc` | `AddressTransaction` | `()` | Create | -| `utxo_by_out_loc` | `OutputLocation` | `transparent::Output` | Delete | -| `utxo_loc_by_transparent_addr_loc` | `AddressUnspentOutput` | `()` | Delete | -| *Sprout* | | | | -| `sprout_nullifiers` | `sprout::Nullifier` | `()` | Create | -| `sprout_anchors` | `sprout::tree::Root` | `sprout::NoteCommitmentTree` | Create | -| `sprout_note_commitment_tree` | `block::Height` | `sprout::NoteCommitmentTree` | Delete | -| *Sapling* | | | | -| `sapling_nullifiers` | `sapling::Nullifier` | `()` | Create | -| `sapling_anchors` | `sapling::tree::Root` | `()` | Create | -| `sapling_note_commitment_tree` | `block::Height` | `sapling::NoteCommitmentTree` | Create | -| *Orchard* | | | | -| `orchard_nullifiers` | `orchard::Nullifier` | `()` | Create | -| `orchard_anchors` | `orchard::tree::Root` | `()` | Create | -| `orchard_note_commitment_tree` | `block::Height` | `orchard::NoteCommitmentTree` | Create | -| *Chain* | | | | -| `history_tree` | `block::Height` | `NonEmptyHistoryTree` | Delete | -| `tip_chain_value_pool` | `()` | `ValueBalance` | Update | - -Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. -Other structures are encoded using `IntoDisk`/`FromDisk`. - -Block and Transaction Data: -- `Height`: 24 bits, big-endian, unsigned (allows for ~30 years worth of blocks) -- `TransactionIndex`: 16 bits, big-endian, unsigned (max ~23,000 transactions in the 2 MB block limit) -- `TransactionCount`: same as `TransactionIndex` -- `TransactionLocation`: `Height \|\| TransactionIndex` -- `OutputIndex`: 24 bits, big-endian, unsigned (max ~223,000 transfers in the 2 MB block limit) -- transparent and shielded input indexes, and shielded output indexes: 16 bits, big-endian, unsigned (max ~49,000 transfers in the 2 MB block limit) -- `OutputLocation`: `TransactionLocation \|\| OutputIndex` -- `AddressLocation`: the first `OutputLocation` used by a `transparent::Address`. - Always has the same value for each address, even if the first output is spent. -- `Utxo`: `Output`, derives extra fields from the `OutputLocation` key -- `AddressUnspentOutput`: `AddressLocation \|\| OutputLocation`, - used instead of a `BTreeSet` value, to improve database performance -- `AddressTransaction`: `AddressLocation \|\| TransactionLocation` - used instead of a `BTreeSet` value, to improve database performance - -We use big-endian encoding for keys, to allow database index prefix searches. - -Amounts: -- `Amount`: 64 bits, little-endian, signed -- `ValueBalance`: `[Amount; 4]` - -Derived Formats: -- `*::NoteCommitmentTree`: `bincode` using `serde` -- `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation - - -The following figure helps visualizing the address index, which is the most complicated part. -Numbers in brackets are array sizes; bold arrows are compositions (i.e. `TransactionLocation` is the -concatenation of `Height` and `TransactionIndex`); dashed arrows are compositions that are also 1-to-many -maps (i.e. `AddressTransaction` is the concatenation of `AddressLocation` and `TransactionLocation`, -but also is used to map each `AddressLocation` to multiple `TransactionLocation`s). - -```mermaid -graph TD; - Address -->|"balance_by_transparent_addr
"| AddressBalance; - AddressBalance ==> Amount; - AddressBalance ==> AddressLocation; - AddressLocation ==> FirstOutputLocation; - AddressLocation -.->|"tx_loc_by_transparent_addr_loc
(AddressTransaction[13])"| TransactionLocation; - TransactionLocation ==> Height; - TransactionLocation ==> TransactionIndex; - OutputLocation -->|utxo_by_out_loc| Output; - OutputLocation ==> TransactionLocation; - OutputLocation ==> OutputIndex; - AddressLocation -.->|"utxo_loc_by_transparent_addr_loc
(AddressUnspentOutput[16])"| OutputLocation; - - AddressBalance["AddressBalance[16]"]; - Amount["Amount[8]"]; - Height["Height[3]"]; - Address["Address[21]"]; - TransactionIndex["TransactionIndex[2]"]; - TransactionLocation["TransactionLocation[5]"]; - OutputIndex["OutputIndex[3]"]; - OutputLocation["OutputLocation[8]"]; - FirstOutputLocation["First OutputLocation[8]"]; - AddressLocation["AddressLocation[8]"]; -``` - -### Implementing consensus rules using rocksdb -[rocksdb-consensus-rules]: #rocksdb-consensus-rules - -Each column family handles updates differently, based on its specific consensus rules: -- Create: - - Each key-value entry is created once. - - Keys are never deleted, values are never updated. -- Delete: - - Each key-value entry is created once. - - Keys can be deleted, but values are never updated. - - Code called by ReadStateService must ignore deleted keys, or use a read lock. - - TODO: should we prevent re-inserts of keys that have been deleted? -- Update: - - Each key-value entry is created once. - - Keys are never deleted, but values can be updated. - - Code called by ReadStateService must handle old or new values, or use a read lock. - -We can't do some kinds of value updates, because they cause RocksDB performance issues: -- Append: - - Keys are never deleted. - - Existing values are never updated. - - Sets of values have additional items appended to the end of the set. - - Code called by ReadStateService must handle shorter or longer sets, or use a read lock. -- Up/Del: - - Keys can be deleted. - - Sets of values have items added or deleted (in any position). - - Code called by ReadStateService must ignore deleted keys and values, - accept shorter or longer sets, and accept old or new values. - Or it should use a read lock. - -Avoid using large sets of values as RocksDB keys or values. - -### RocksDB read locks -[rocksdb-read-locks]: #rocksdb-read-locks - -The read-only ReadStateService needs to handle concurrent writes and deletes of the finalized -column families it reads. It must also handle overlaps between the cached non-finalized `Chain`, -and the current finalized state database. - -The StateService uses RocksDB transactions for each block write. -So ReadStateService queries that only access a single key or value will always see -a consistent view of the database. - -If a ReadStateService query only uses column families that have keys and values appended -(`Never` in the Updates table above), it should ignore extra appended values. -Most queries do this by default. - -For more complex queries, there are several options: - -Reading across multiple column families: -1. Ignore deleted values using custom Rust code -2. Take a database snapshot - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.snapshot - -Reading a single column family: -3. multi_get - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.multi_get_cf -4. iterator - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.iterator_cf - -RocksDB also has read transactions, but they don't seem to be exposed in the Rust crate. - -### Low-Level Implementation Details -[rocksdb-low-level]: #rocksdb-low-level - -RocksDB ignores duplicate puts and deletes, preserving the latest values. -If rejecting duplicate puts or deletes is consensus-critical, -check [`db.get_cf(cf, key)?`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.DBWithThreadMode.html#method.get_cf) -before putting or deleting any values in a batch. - -Currently, these restrictions should be enforced by code review: -- multiple `zs_insert`s are only allowed on Update column families, and -- [`delete_cf`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.WriteBatch.html#method.delete_cf) - is only allowed on Delete column families. - -In future, we could enforce these restrictions by: -- creating traits for Never, Delete, and Update -- doing different checks in `zs_insert` depending on the trait -- wrapping `delete_cf` in a trait, and only implementing that trait for types that use Delete column families. - -As of June 2021, the Rust `rocksdb` crate [ignores the delete callback](https://docs.rs/rocksdb/0.16.0/src/rocksdb/merge_operator.rs.html#83-94), -and merge operators are unreliable (or have undocumented behaviour). -So they should not be used for consensus-critical checks. - -### Notes on rocksdb column families -[rocksdb-column-families]: #rocksdb-column-families - -- The `hash_by_height` and `height_by_hash` column families provide a bijection between - block heights and block hashes. (Since the rocksdb state only stores finalized - state, they are actually a bijection). - -- Similarly, the `tx_loc_by_hash` and `hash_by_tx_loc` column families provide a bijection between - transaction locations and transaction hashes. - -- The `block_header_by_height` column family provides a bijection between block - heights and block header data. There is no corresponding `height_by_block` column - family: instead, hash the block header, and use the hash from `height_by_hash`. - (Since the rocksdb state only stores finalized state, they are actually a bijection). - Similarly, there are no column families that go from transaction data - to transaction locations: hash the transaction and use `tx_loc_by_hash`. - -- Block headers and transactions are stored separately in the database, - so that individual transactions can be accessed efficiently. - Blocks can be re-created on request using the following process: - - Look up `height` in `height_by_hash` - - Get the block header for `height` from `block_header_by_height` - - Iterate from `TransactionIndex` 0, - to get each transaction with `height` from `tx_by_loc`, - stopping when there are no more transactions in the block - -- Block headers are stored by height, not by hash. This has the downside that looking - up a block by hash requires an extra level of indirection. The upside is - that blocks with adjacent heights are adjacent in the database, and many - common access patterns, such as helping a client sync the chain or doing - analysis, access blocks in (potentially sparse) height order. In addition, - the fact that we commit blocks in order means we're writing only to the end - of the rocksdb column family, which may help save space. - -- Similarly, transaction data is stored in chain order in `tx_by_loc` and `utxo_by_out_loc`, - and chain order within each vector in `utxo_loc_by_transparent_addr_loc` and - `tx_loc_by_transparent_addr_loc`. - -- `TransactionLocation`s are stored as a `(height, index)` pair referencing the - height of the transaction's parent block and the transaction's index in that - block. This would more traditionally be a `(hash, index)` pair, but because - we store blocks by height, storing the height saves one level of indirection. - Transaction hashes can be looked up using `hash_by_tx_loc`. - -- Similarly, UTXOs are stored in `utxo_by_out_loc` by `OutputLocation`, - rather than `OutPoint`. `OutPoint`s can be looked up using `tx_loc_by_hash`, - and reconstructed using `hash_by_tx_loc`. - -- The `Utxo` type can be constructed from the `OutputLocation` and `Output` data, - `height: OutputLocation.height`, and - `is_coinbase: OutputLocation.transaction_index == 0` - (coinbase transactions are always the first transaction in a block). - -- `balance_by_transparent_addr` is the sum of all `utxo_loc_by_transparent_addr_loc`s - that are still in `utxo_by_out_loc`. It is cached to improve performance for - addresses with large UTXO sets. It also stores the `AddressLocation` for each - address, which allows for efficient lookups. - -- `utxo_loc_by_transparent_addr_loc` stores unspent transparent output locations - by address. The address location and UTXO location are stored as a RocksDB key, - so they are in chain order, and get good database performance. - This column family includes also includes the original address location UTXO, - if it has not been spent. - -- When a block write deletes a UTXO from `utxo_by_out_loc`, - that UTXO location should be deleted from `utxo_loc_by_transparent_addr_loc`. - The deleted UTXO can be removed efficiently, because the UTXO location is part of the key. - This is an index optimisation, which does not affect query results. - -- `tx_loc_by_transparent_addr_loc` stores transaction locations by address. - This list includes transactions containing spent UTXOs. - The address location and transaction location are stored as a RocksDB key, - so they are in chain order, and get good database performance. - This column family also includes the `TransactionLocation` - of the transaction for the `AddressLocation`. - -- The `sprout_note_commitment_tree` stores the note commitment tree state - at the tip of the finalized state, for the specific pool. There is always - a single entry. Each tree is stored - as a "Merkle tree frontier" which is basically a (logarithmic) subset of - the Merkle tree nodes as required to insert new items. - For each block committed, the old tree is deleted and a new one is inserted - by its new height. - **TODO:** store the sprout note commitment tree by `()`, - to avoid ReadStateService concurrent write issues. - -- The `{sapling, orchard}_note_commitment_tree` stores the note commitment tree - state for every height, for the specific pool. Each tree is stored - as a "Merkle tree frontier" which is basically a (logarithmic) subset of - the Merkle tree nodes as required to insert new items. - -- `history_tree` stores the ZIP-221 history tree state at the tip of the finalized - state. There is always a single entry for it. The tree is stored as the set of "peaks" - of the "Merkle mountain range" tree structure, which is what is required to - insert new items. - **TODO:** store the history tree by `()`, to avoid ReadStateService concurrent write issues. - -- Each `*_anchors` stores the anchor (the root of a Merkle tree) of the note commitment - tree of a certain block. We only use the keys since we just need the set of anchors, - regardless of where they come from. The exception is `sprout_anchors` which also maps - the anchor to the matching note commitment tree. This is required to support interstitial - treestates, which are unique to Sprout. - **TODO:** store the `Root` hash in `sprout_note_commitment_tree`, and use it to look up the - note commitment tree. This de-duplicates tree state data. But we currently only store one sprout tree by height. - -- The value pools are only stored for the finalized tip. - -- We do not store the cumulative work for the finalized chain, - because the finalized work is equal for all non-finalized chains. - So the additional non-finalized work can be used to calculate the relative chain order, - and choose the best chain. +The current database format is documented in [Upgrading the State Database](../state-db-upgrades.md). ## Committing finalized blocks diff --git a/book/src/dev/rfcs/0011-async-rust-in-zebra.md b/book/src/dev/rfcs/0011-async-rust-in-zebra.md index b3c1212032c..000bb2cbd5a 100644 --- a/book/src/dev/rfcs/0011-async-rust-in-zebra.md +++ b/book/src/dev/rfcs/0011-async-rust-in-zebra.md @@ -14,7 +14,7 @@ with the [tokio](https://docs.rs/tokio/) executor. At a higher level, Zebra also uses [`tower::Service`s](https://docs.rs/tower/0.4.1/tower/trait.Service.html), [`tower::Buffer`s](https://docs.rs/tower/0.4.1/tower/buffer/struct.Buffer.html), -and our own [`tower-batch`](https://github.com/ZcashFoundation/zebra/tree/main/tower-batch) +and our own [`tower-batch-control`](https://github.com/ZcashFoundation/zebra/tree/main/tower-batch-control) implementation. # Motivation @@ -737,7 +737,7 @@ particularly important for code that modifies Zebra's highly concurrent crates: - `zebra-network` - `zebra-state` - `zebra-consensus` -- `tower-batch` +- `tower-batch-control` - `tower-fallback` ## Monitoring Async Code diff --git a/book/src/dev/state-db-upgrades.md b/book/src/dev/state-db-upgrades.md new file mode 100644 index 00000000000..2174aba24ec --- /dev/null +++ b/book/src/dev/state-db-upgrades.md @@ -0,0 +1,356 @@ +# Zebra Cached State Database Implementation + +## Upgrading the State Database + +For most state upgrades, we want to modify the database format of the existing database. If we +change the major database version, every user needs to re-download and re-verify all the blocks, +which can take days. + +### In-Place Upgrade Goals + +- avoid a full download and rebuild of the state +- the previous state format must be able to be loaded by the new state + - this is checked the first time CI runs on a PR with a new state version. + After the first CI run, the cached state is marked as upgraded, so the upgrade doesn't run + again. If CI fails on the first run, any cached states with that version should be deleted. +- previous zebra versions should be able to load the new format + - this is checked by other PRs running using the upgraded cached state, but only if a Rust PR + runs after the new PR's CI finishes, but before it merges +- best-effort loading of older supported states by newer Zebra versions +- best-effort compatibility between newer states and older supported Zebra versions + +### Design Constraints +[design]: #design + +Upgrades run concurrently with state verification and RPC requests. + +This means that: +- the state must be able to read the old and new formats + - it can't panic if the data is missing + - it can't give incorrect results, because that can affect verification or wallets + - it can return an error + - it can only return an `Option` if the caller handles it correctly +- multiple upgrades must produce a valid state format + - if Zebra is restarted, the format upgrade will run multiple times + - if an older Zebra version opens the state, data can be written in an older format +- the format must be valid before and after each database transaction or API call, because an upgrade can be cancelled at any time + - multi-column family changes should made in database transactions + - if you are building new column family, disable state queries, then enable them once it's done + - if each database API call produces a valid format, transactions aren't needed + +If there is an upgrade failure, it can panic and tell the user to delete their cached state and re-launch Zebra. + +### Implementation Steps + +- [ ] update the [database format](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#current) in the Zebra docs +- [ ] increment the state minor version +- [ ] write the new format in the block write task +- [ ] update older formats in the format upgrade task +- [ ] test that the new format works when creating a new state, and updating an older state + +See the [upgrade design docs](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#design) for more details. + +These steps can be copied into tickets. + +## Current State Database Format +[current]: #current + +rocksdb provides a persistent, thread-safe `BTreeMap<&[u8], &[u8]>`. Each map is +a distinct "tree". Keys are sorted using lexographic order (`[u8].sorted()`) on byte strings, so +integer values should be stored using big-endian encoding (so that the lex +order on byte strings is the numeric ordering). + +Note that the lex order storage allows creating 1-to-many maps using keys only. +For example, the `tx_loc_by_transparent_addr_loc` allows mapping each address +to all transactions related to it, by simply storing each transaction prefixed +with the address as the key, leaving the value empty. Since rocksdb allows +listing all keys with a given prefix, it will allow listing all transactions +related to a given address. + +We use the following rocksdb column families: + +| Column Family | Keys | Values | Changes | +| ---------------------------------- | ---------------------- | ----------------------------- | ------- | +| *Blocks* | | | | +| `hash_by_height` | `block::Height` | `block::Hash` | Create | +| `height_by_hash` | `block::Hash` | `block::Height` | Create | +| `block_header_by_height` | `block::Height` | `block::Header` | Create | +| *Transactions* | | | | +| `tx_by_loc` | `TransactionLocation` | `Transaction` | Create | +| `hash_by_tx_loc` | `TransactionLocation` | `transaction::Hash` | Create | +| `tx_loc_by_hash` | `transaction::Hash` | `TransactionLocation` | Create | +| *Transparent* | | | | +| `balance_by_transparent_addr` | `transparent::Address` | `Amount \|\| AddressLocation` | Update | +| `tx_loc_by_transparent_addr_loc` | `AddressTransaction` | `()` | Create | +| `utxo_by_out_loc` | `OutputLocation` | `transparent::Output` | Delete | +| `utxo_loc_by_transparent_addr_loc` | `AddressUnspentOutput` | `()` | Delete | +| *Sprout* | | | | +| `sprout_nullifiers` | `sprout::Nullifier` | `()` | Create | +| `sprout_anchors` | `sprout::tree::Root` | `sprout::NoteCommitmentTree` | Create | +| `sprout_note_commitment_tree` | `block::Height` | `sprout::NoteCommitmentTree` | Delete | +| *Sapling* | | | | +| `sapling_nullifiers` | `sapling::Nullifier` | `()` | Create | +| `sapling_anchors` | `sapling::tree::Root` | `()` | Create | +| `sapling_note_commitment_tree` | `block::Height` | `sapling::NoteCommitmentTree` | Create | +| *Orchard* | | | | +| `orchard_nullifiers` | `orchard::Nullifier` | `()` | Create | +| `orchard_anchors` | `orchard::tree::Root` | `()` | Create | +| `orchard_note_commitment_tree` | `block::Height` | `orchard::NoteCommitmentTree` | Create | +| *Chain* | | | | +| `history_tree` | `block::Height` | `NonEmptyHistoryTree` | Delete | +| `tip_chain_value_pool` | `()` | `ValueBalance` | Update | + +Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. +Other structures are encoded using `IntoDisk`/`FromDisk`. + +Block and Transaction Data: +- `Height`: 24 bits, big-endian, unsigned (allows for ~30 years worth of blocks) +- `TransactionIndex`: 16 bits, big-endian, unsigned (max ~23,000 transactions in the 2 MB block limit) +- `TransactionCount`: same as `TransactionIndex` +- `TransactionLocation`: `Height \|\| TransactionIndex` +- `OutputIndex`: 24 bits, big-endian, unsigned (max ~223,000 transfers in the 2 MB block limit) +- transparent and shielded input indexes, and shielded output indexes: 16 bits, big-endian, unsigned (max ~49,000 transfers in the 2 MB block limit) +- `OutputLocation`: `TransactionLocation \|\| OutputIndex` +- `AddressLocation`: the first `OutputLocation` used by a `transparent::Address`. + Always has the same value for each address, even if the first output is spent. +- `Utxo`: `Output`, derives extra fields from the `OutputLocation` key +- `AddressUnspentOutput`: `AddressLocation \|\| OutputLocation`, + used instead of a `BTreeSet` value, to improve database performance +- `AddressTransaction`: `AddressLocation \|\| TransactionLocation` + used instead of a `BTreeSet` value, to improve database performance + +We use big-endian encoding for keys, to allow database index prefix searches. + +Amounts: +- `Amount`: 64 bits, little-endian, signed +- `ValueBalance`: `[Amount; 4]` + +Derived Formats: +- `*::NoteCommitmentTree`: `bincode` using `serde` +- `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation + + +The following figure helps visualizing the address index, which is the most complicated part. +Numbers in brackets are array sizes; bold arrows are compositions (i.e. `TransactionLocation` is the +concatenation of `Height` and `TransactionIndex`); dashed arrows are compositions that are also 1-to-many +maps (i.e. `AddressTransaction` is the concatenation of `AddressLocation` and `TransactionLocation`, +but also is used to map each `AddressLocation` to multiple `TransactionLocation`s). + +```mermaid +graph TD; + Address -->|"balance_by_transparent_addr
"| AddressBalance; + AddressBalance ==> Amount; + AddressBalance ==> AddressLocation; + AddressLocation ==> FirstOutputLocation; + AddressLocation -.->|"tx_loc_by_transparent_addr_loc
(AddressTransaction[13])"| TransactionLocation; + TransactionLocation ==> Height; + TransactionLocation ==> TransactionIndex; + OutputLocation -->|utxo_by_out_loc| Output; + OutputLocation ==> TransactionLocation; + OutputLocation ==> OutputIndex; + AddressLocation -.->|"utxo_loc_by_transparent_addr_loc
(AddressUnspentOutput[16])"| OutputLocation; + + AddressBalance["AddressBalance[16]"]; + Amount["Amount[8]"]; + Height["Height[3]"]; + Address["Address[21]"]; + TransactionIndex["TransactionIndex[2]"]; + TransactionLocation["TransactionLocation[5]"]; + OutputIndex["OutputIndex[3]"]; + OutputLocation["OutputLocation[8]"]; + FirstOutputLocation["First OutputLocation[8]"]; + AddressLocation["AddressLocation[8]"]; +``` + +### Implementing consensus rules using rocksdb +[rocksdb-consensus-rules]: #rocksdb-consensus-rules + +Each column family handles updates differently, based on its specific consensus rules: +- Create: + - Each key-value entry is created once. + - Keys are never deleted, values are never updated. +- Delete: + - Each key-value entry is created once. + - Keys can be deleted, but values are never updated. + - Code called by ReadStateService must ignore deleted keys, or use a read lock. + - TODO: should we prevent re-inserts of keys that have been deleted? +- Update: + - Each key-value entry is created once. + - Keys are never deleted, but values can be updated. + - Code called by ReadStateService must handle old or new values, or use a read lock. + +We can't do some kinds of value updates, because they cause RocksDB performance issues: +- Append: + - Keys are never deleted. + - Existing values are never updated. + - Sets of values have additional items appended to the end of the set. + - Code called by ReadStateService must handle shorter or longer sets, or use a read lock. +- Up/Del: + - Keys can be deleted. + - Sets of values have items added or deleted (in any position). + - Code called by ReadStateService must ignore deleted keys and values, + accept shorter or longer sets, and accept old or new values. + Or it should use a read lock. + +Avoid using large sets of values as RocksDB keys or values. + +### RocksDB read locks +[rocksdb-read-locks]: #rocksdb-read-locks + +The read-only ReadStateService needs to handle concurrent writes and deletes of the finalized +column families it reads. It must also handle overlaps between the cached non-finalized `Chain`, +and the current finalized state database. + +The StateService uses RocksDB transactions for each block write. +So ReadStateService queries that only access a single key or value will always see +a consistent view of the database. + +If a ReadStateService query only uses column families that have keys and values appended +(`Never` in the Updates table above), it should ignore extra appended values. +Most queries do this by default. + +For more complex queries, there are several options: + +Reading across multiple column families: +1. Ignore deleted values using custom Rust code +2. Take a database snapshot - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.snapshot + +Reading a single column family: +3. multi_get - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.multi_get_cf +4. iterator - https://docs.rs/rocksdb/latest/rocksdb/struct.DBWithThreadMode.html#method.iterator_cf + +RocksDB also has read transactions, but they don't seem to be exposed in the Rust crate. + +### Low-Level Implementation Details +[rocksdb-low-level]: #rocksdb-low-level + +RocksDB ignores duplicate puts and deletes, preserving the latest values. +If rejecting duplicate puts or deletes is consensus-critical, +check [`db.get_cf(cf, key)?`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.DBWithThreadMode.html#method.get_cf) +before putting or deleting any values in a batch. + +Currently, these restrictions should be enforced by code review: +- multiple `zs_insert`s are only allowed on Update column families, and +- [`delete_cf`](https://docs.rs/rocksdb/0.16.0/rocksdb/struct.WriteBatch.html#method.delete_cf) + is only allowed on Delete column families. + +In future, we could enforce these restrictions by: +- creating traits for Never, Delete, and Update +- doing different checks in `zs_insert` depending on the trait +- wrapping `delete_cf` in a trait, and only implementing that trait for types that use Delete column families. + +As of June 2021, the Rust `rocksdb` crate [ignores the delete callback](https://docs.rs/rocksdb/0.16.0/src/rocksdb/merge_operator.rs.html#83-94), +and merge operators are unreliable (or have undocumented behaviour). +So they should not be used for consensus-critical checks. + +### Notes on rocksdb column families +[rocksdb-column-families]: #rocksdb-column-families + +- The `hash_by_height` and `height_by_hash` column families provide a bijection between + block heights and block hashes. (Since the rocksdb state only stores finalized + state, they are actually a bijection). + +- Similarly, the `tx_loc_by_hash` and `hash_by_tx_loc` column families provide a bijection between + transaction locations and transaction hashes. + +- The `block_header_by_height` column family provides a bijection between block + heights and block header data. There is no corresponding `height_by_block` column + family: instead, hash the block header, and use the hash from `height_by_hash`. + (Since the rocksdb state only stores finalized state, they are actually a bijection). + Similarly, there are no column families that go from transaction data + to transaction locations: hash the transaction and use `tx_loc_by_hash`. + +- Block headers and transactions are stored separately in the database, + so that individual transactions can be accessed efficiently. + Blocks can be re-created on request using the following process: + - Look up `height` in `height_by_hash` + - Get the block header for `height` from `block_header_by_height` + - Iterate from `TransactionIndex` 0, + to get each transaction with `height` from `tx_by_loc`, + stopping when there are no more transactions in the block + +- Block headers are stored by height, not by hash. This has the downside that looking + up a block by hash requires an extra level of indirection. The upside is + that blocks with adjacent heights are adjacent in the database, and many + common access patterns, such as helping a client sync the chain or doing + analysis, access blocks in (potentially sparse) height order. In addition, + the fact that we commit blocks in order means we're writing only to the end + of the rocksdb column family, which may help save space. + +- Similarly, transaction data is stored in chain order in `tx_by_loc` and `utxo_by_out_loc`, + and chain order within each vector in `utxo_loc_by_transparent_addr_loc` and + `tx_loc_by_transparent_addr_loc`. + +- `TransactionLocation`s are stored as a `(height, index)` pair referencing the + height of the transaction's parent block and the transaction's index in that + block. This would more traditionally be a `(hash, index)` pair, but because + we store blocks by height, storing the height saves one level of indirection. + Transaction hashes can be looked up using `hash_by_tx_loc`. + +- Similarly, UTXOs are stored in `utxo_by_out_loc` by `OutputLocation`, + rather than `OutPoint`. `OutPoint`s can be looked up using `tx_loc_by_hash`, + and reconstructed using `hash_by_tx_loc`. + +- The `Utxo` type can be constructed from the `OutputLocation` and `Output` data, + `height: OutputLocation.height`, and + `is_coinbase: OutputLocation.transaction_index == 0` + (coinbase transactions are always the first transaction in a block). + +- `balance_by_transparent_addr` is the sum of all `utxo_loc_by_transparent_addr_loc`s + that are still in `utxo_by_out_loc`. It is cached to improve performance for + addresses with large UTXO sets. It also stores the `AddressLocation` for each + address, which allows for efficient lookups. + +- `utxo_loc_by_transparent_addr_loc` stores unspent transparent output locations + by address. The address location and UTXO location are stored as a RocksDB key, + so they are in chain order, and get good database performance. + This column family includes also includes the original address location UTXO, + if it has not been spent. + +- When a block write deletes a UTXO from `utxo_by_out_loc`, + that UTXO location should be deleted from `utxo_loc_by_transparent_addr_loc`. + The deleted UTXO can be removed efficiently, because the UTXO location is part of the key. + This is an index optimisation, which does not affect query results. + +- `tx_loc_by_transparent_addr_loc` stores transaction locations by address. + This list includes transactions containing spent UTXOs. + The address location and transaction location are stored as a RocksDB key, + so they are in chain order, and get good database performance. + This column family also includes the `TransactionLocation` + of the transaction for the `AddressLocation`. + +- The `sprout_note_commitment_tree` stores the note commitment tree state + at the tip of the finalized state, for the specific pool. There is always + a single entry. Each tree is stored + as a "Merkle tree frontier" which is basically a (logarithmic) subset of + the Merkle tree nodes as required to insert new items. + For each block committed, the old tree is deleted and a new one is inserted + by its new height. + **TODO:** store the sprout note commitment tree by `()`, + to avoid ReadStateService concurrent write issues. + +- The `{sapling, orchard}_note_commitment_tree` stores the note commitment tree + state for every height, for the specific pool. Each tree is stored + as a "Merkle tree frontier" which is basically a (logarithmic) subset of + the Merkle tree nodes as required to insert new items. + +- `history_tree` stores the ZIP-221 history tree state at the tip of the finalized + state. There is always a single entry for it. The tree is stored as the set of "peaks" + of the "Merkle mountain range" tree structure, which is what is required to + insert new items. + **TODO:** store the history tree by `()`, to avoid ReadStateService concurrent write issues. + +- Each `*_anchors` stores the anchor (the root of a Merkle tree) of the note commitment + tree of a certain block. We only use the keys since we just need the set of anchors, + regardless of where they come from. The exception is `sprout_anchors` which also maps + the anchor to the matching note commitment tree. This is required to support interstitial + treestates, which are unique to Sprout. + **TODO:** store the `Root` hash in `sprout_note_commitment_tree`, and use it to look up the + note commitment tree. This de-duplicates tree state data. But we currently only store one sprout tree by height. + +- The value pools are only stored for the finalized tip. + +- We do not store the cumulative work for the finalized chain, + because the finalized work is equal for all non-finalized chains. + So the additional non-finalized work can be used to calculate the relative chain order, + and choose the best chain. diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 65760f71fa5..0f99bd11287 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -11,13 +11,13 @@ You can deploy Zebra for a daily use with the images available in [Docker Hub](h ### Ready to use image ```shell -docker run --detach zfnd/zebra:1.0.0-rc.8 +docker run --detach zfnd/zebra:latest ``` ### Build it locally ```shell -git clone --depth 1 --branch v1.0.0-rc.8 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.1.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` @@ -30,4 +30,4 @@ See [Building Zebra](https://github.com/ZcashFoundation/zebra#building-zebra) fo The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-integration-docker.yml) (`zebrad-test`, `lighwalletd`) might be deleted on a scheduled basis. -We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zealous-zebra/us/zebra) to build external tools and test images +We use [Docker Hub](https://hub.docker.com/r/zfnd/zebra) for end-user images and [Google Artifact Registry](https://console.cloud.google.com/artifacts/docker/zfnd-dev-zebra/us/zebra) to build external tools and test images diff --git a/book/src/user/grafana.png b/book/src/user/grafana.png new file mode 100644 index 00000000000..fa4e20a954d Binary files /dev/null and b/book/src/user/grafana.png differ diff --git a/book/src/user/install.md b/book/src/user/install.md index fad05d693c2..6bd65ad7a7e 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -1,37 +1,73 @@ # Installing Zebra -Follow the [Docker or compilation instructions in the README](https://github.com/ZcashFoundation/zebra#getting-started). +Follow the [Docker or compilation instructions](https://zebra.zfnd.org/index.html#getting-started). -#### Build Troubleshooting +## Installing Dependencies -If you're having trouble with: +To compile Zebra from source, you will need to [install some dependencies.](https://zebra.zfnd.org/index.html#building-zebra). -Dependencies: -- use `cargo install` without `--locked` to build with the latest versions of each dependency -- **sqlite linker errors:** libsqlite3 is an optional dependency of the `zebra-network/tor` feature. - If you don't have it installed, you might see errors like `note: /usr/bin/ld: cannot find -lsqlite3`. - [Follow the arti instructions](https://gitlab.torproject.org/tpo/core/arti/-/blob/main/CONTRIBUTING.md#setting-up-your-development-environment) - to install libsqlite3, or use one of these commands instead: +## Alternative Compilation Methods + +### Compiling Manually from git + +To compile Zebra directly from GitHub, or from a GitHub release source archive: + +1. Install the dependencies (see above) + +2. Get the source code using `git` or from a GitHub source package ```sh -cargo build -cargo build -p zebrad --all-features +git clone https://github.com/ZcashFoundation/zebra.git +cd zebra +git checkout v1.1.0 ``` -Compilers: +3. Build and Run `zebrad` + +```sh +cargo build --release --bin zebrad +target/release/zebrad start +``` + +### Compiling from git using cargo install + +```sh +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.1.0 zebrad +``` + +### Compiling on ARM + +If you're using an ARM machine, [install the Rust compiler for +ARM](https://rust-lang.github.io/rustup/installation/other.html). If you build +using the x86_64 tools, Zebra might run really slowly. + +## Build Troubleshooting + +If you're having trouble with: + +### Compilers - **clang:** install both `libclang` and `clang` - they are usually different packages - **libclang:** check out the [clang-sys documentation](https://github.com/KyleMayes/clang-sys#dependencies) - **g++ or MSVC++:** try using clang or Xcode instead -- **rustc:** use rustc 1.65 or later +- **rustc:** use the latest stable `rustc` and `cargo` versions - Zebra does not have a minimum supported Rust version (MSRV) policy: any release can update the required Rust version. ### Dependencies -Zebra primarily depends on pure Rust crates, and some Rust/C++ crates: +- use `cargo install` without `--locked` to build with the latest versions of each dependency + +#### Optional Tor feature + +- **sqlite linker errors:** libsqlite3 is an optional dependency of the `zebra-network/tor` feature. + If you don't have it installed, you might see errors like `note: /usr/bin/ld: cannot find -lsqlite3`. + [Follow the arti instructions](https://gitlab.torproject.org/tpo/core/arti/-/blob/main/CONTRIBUTING.md#setting-up-your-development-environment) + to install libsqlite3, or use one of these commands instead: + +```sh +cargo build +cargo build -p zebrad --all-features +``` -- [rocksdb](https://crates.io/crates/rocksdb) -- [zcash_script](https://crates.io/crates/zcash_script) -They will be automatically built along with `zebrad`. diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index aade5583f0c..cc9c99100af 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -1,12 +1,15 @@ # Running lightwalletd with zebra -Zebra's RPC methods can support a lightwalletd service backed by zebrad. +Zebra's RPC methods can support a lightwalletd service backed by zebrad. We +recommend using +[adityapk00/lightwalletd](https://github.com/adityapk00/lightwalletd) because we +use it in testing. Other `lightwalletd` forks have limited support, see the +[Sync lightwalletd](#sync-lightwalletd) section for more info. Contents: -- [Download and build Zebra](#download-and-build-zebra) - [Configure zebra for lightwalletd](#configure-zebra-for-lightwalletd) - - [RPC section](#rpc-section) + - [JSON-RPC](#json-rpc) - [Sync Zebra](#sync-zebra) - [Download and build lightwalletd](#download-and-build-lightwalletd) - [Sync lightwalletd](#sync-lightwalletd) @@ -15,16 +18,8 @@ Contents: - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) - [Run the wallet](#run-the-wallet) -## Download and build Zebra -[#download-and-build-zebra]: #download-and-build-zebra - -```console -cargo install --locked --git https://github.com/ZcashFoundation/zebra zebrad -``` - -Zebra binary will be at ` ~/.cargo/bin/zebrad`. - ## Configure zebra for lightwalletd + [#configure-zebra-for-lightwalletd]: #configure-zebra-for-lightwalletd We need a zebra configuration file. First, we create a file with the default settings: @@ -37,17 +32,33 @@ The above command places the generated `zebrad.toml` config file in the default Tweak the following option in order to prepare for lightwalletd setup. -### RPC section -[#rpc-section]: #rpc-section +### JSON-RPC -This change is required for zebra to behave as an RPC endpoint. The standard port for RPC endpoint is `8232`. +[#rpc-section]: #json-rpc -``` +We need to configure Zebra to behave as an RPC endpoint. The standard RPC port +for Zebra is: + +- `8232` for Mainnet, and +- `18323` for Testnet. + +For example, to use Zebra as a `lightwalletd` backend on Mainnet, give it this +`~/.config/zebrad.toml`: + +```toml [rpc] -listen_addr = "127.0.0.1:8232" +# listen for RPC queries on localhost +listen_addr = '127.0.0.1:8232' + +# automatically use multiple CPU threads +parallel_cpu_threads = 0 ``` +**WARNING:** This config allows multiple Zebra instances to share the same RPC port. +See the [RPC config documentation](https://doc.zebra.zfnd.org/zebra_rpc/config/struct.Config.html) for details. + ## Sync Zebra + [#sync-zebra]: #sync-zebra With the configuration in place you can start synchronizing Zebra with the Zcash blockchain. This may take a while depending on your hardware. diff --git a/book/src/user/metrics.md b/book/src/user/metrics.md index c2fb19ed742..030f67c1df0 100644 --- a/book/src/user/metrics.md +++ b/book/src/user/metrics.md @@ -3,20 +3,31 @@ Zebra has support for Prometheus, configured using the `prometheus` compile-time feature, and the [`MetricsSection`][metrics_section] runtime configuration. -This requires supporting infrastructure to collect and visualize metrics, for example: +The following steps can be used to send real time Zebra metrics data into a grafana +front end that you can visualize: -1. Create the `zebrad.toml` file with the following contents: +1. Build zebra with `prometheus` feature: + ``` + cargo install --features prometheus --locked --git https://github.com/ZcashFoundation/zebra zebrad + ``` + +2. Create a `zebrad.toml` file that we can edit: + ``` + zebrad generate -o zebrad.toml + ``` + +3. Add `endpoint_addr` to the `metrics` section: ``` [metrics] endpoint_addr = "127.0.0.1:9999" ``` -2. Run Zebra, and specify the path to the `zebrad.toml` file, for example: +4. Run Zebra, and specify the path to the `zebrad.toml` file, for example: ``` zebrad -c zebrad.toml start ``` -3. Install and run Prometheus and Grafana via Docker: +5. Install and run Prometheus and Grafana via Docker: ``` # create a storage volume for grafana (once) @@ -34,13 +45,15 @@ This requires supporting infrastructure to collect and visualize metrics, for ex Now the grafana dashboard is available at [http://localhost:3030](http://localhost:3030) ; the default username and password is `admin`/`admin`. Prometheus scrapes Zebra on `localhost:9999`, and provides the results on `localhost:9090`. -4. Configure Grafana with a Prometheus HTTP Data Source, using Zebra's `metrics.endpoint_addr`. +6. Configure Grafana with a Prometheus HTTP Data Source, using Zebra's `metrics.endpoint_addr`. In the grafana dashboard: 1. Create a new Prometheus Data Source `Prometheus-Zebra` 2. Enter the HTTP URL: `127.0.0.1:9090` 3. Save the configuration -5. Now you can add the grafana dashboards from `zebra/grafana` (Create > Import > Upload JSON File), or create your own. +7. Now you can add the grafana dashboards from `zebra/grafana` (Create > Import > Upload JSON File), or create your own. + +![image info](grafana.png) [metrics_section]: https://doc.zebra.zfnd.org/zebrad/config/struct.MetricsSection.html diff --git a/book/src/user/mining-docker.md b/book/src/user/mining-docker.md new file mode 100644 index 00000000000..e5d974317b8 --- /dev/null +++ b/book/src/user/mining-docker.md @@ -0,0 +1,45 @@ +# Mining with Zebra in Docker + +Some of our published [Docker images](https://hub.docker.com/r/zfnd/zebra/tags) +have the `.experimental` suffix in their name. We compile these images with the +`getblocktemplate-rpcs` feature, and you can use them for your mining +operations. For example, executing + +```bash +docker run -e MINER_ADDRESS="t1XhG6pT9xRqRQn3BHP7heUou1RuYrbcrCc" -p 8232:8232 zfnd/zebra:v1.1.0.experimental +``` + +will start a container on Mainnet and bind port 8232 on your Docker host. If you +want to start generating blocks, you need to let Zebra sync first. + +Note that you must pass the address for your mining rewards via the +`MINER_ADDRESS` environment variable when you are starting the container, as we +did in the example above. The address we used starts with the prefix `t1`, +meaning it is a Mainnet P2PKH address. Please remember to set your own address +for the rewards. + +The port we mapped between the container and the host with the `-p` flag in the +example above is Zebra's default Mainnet RPC port. If you want to use a +different one, you can specify it in the `RPC_PORT` environment variable, +similarly to `MINER_ADDRESS`, and then map it with the Docker's `-p` flag. + +Instead of listing the environment variables on the command line, you can use +Docker's `--env-file` flag to specify a file containing the variables. You +can find more info here +https://docs.docker.com/engine/reference/commandline/run/#env. + +## Mining on Testnet + +If you want to mine on Testnet, you need to set the `NETWORK` environment +variable to `Testnet` and use a Testnet address for the rewards. For example, +running + +```bash +docker run -e NETWORK="Testnet" -e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" -p 18232:18232 zfnd/zebra:v1.1.0.experimental +``` + +will start a container on Testnet and bind port 18232 on your Docker host, which +is the standard Testnet RPC port. Notice that we also used a different rewards +address. It starts with the prefix `t2`, indicating that it is a Testnet +address. A Mainnet address would prevent Zebra from starting on Testnet, and +conversely, a Testnet address would prevent Zebra from starting on Mainnet. diff --git a/book/src/user/requirements.md b/book/src/user/requirements.md index 70ee6b00c5b..d12b16ce338 100644 --- a/book/src/user/requirements.md +++ b/book/src/user/requirements.md @@ -30,13 +30,20 @@ Zebra uses the following inbound and outbound TCP ports: - 8233 on Mainnet - 18233 on Testnet -Outbound connections are required to sync, inbound connections are optional. -Zebra also needs access to the Zcash DNS seeders, via the OS DNS resolver -(usually port 53). +If you configure Zebra with a specific +[`listen_addr`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.listen_addr), +it will advertise this address to other nodes for inbound connections. Outbound +connections are required to sync, inbound connections are optional. Zebra also +needs access to the Zcash DNS seeders, via the OS DNS resolver (usually port +53). -The typical Mainnet network usage is: +Zebra makes outbound connections to peers on any port. But `zcashd` prefers +peers on the default ports, so that it can't be used for DDoS attacks on other +networks. -- Initial sync: 300 GB download, as already noted, we expect the initial +### Typical Mainnet Network Usage + +- Initial sync: 300 GB download. As already noted, we expect the initial download to grow. - Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests. @@ -51,40 +58,3 @@ ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) ## Sentry Production Monitoring Compile Zebra with `--features sentry` to monitor it using Sentry in production. - -# Troubleshooting - -We continuously test that our builds and tests pass on the _latest_ [GitHub -Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources) -for: - -- macOS, -- Ubuntu, -- Docker: - - Debian Bullseye. - -## Memory Issues - -- If Zebra's build runs out of RAM, try setting `export CARGO_BUILD_JOBS=2`. -- If Zebra's tests timeout or run out of RAM, try running `cargo test -- --test-threads=2`. Note that `cargo` uses all processor cores on your machine - by default. - -## Network Issues - -- Some of Zebra's tests download Zcash blocks, so they might be unreliable - depending on your network connection. You can set `ZEBRA_SKIP_NETWORK_TESTS=1` - to skip the network tests. -- Zebra may be unreliable on Testnet, and under less-than-perfect network - conditions. See our [future - work](https://github.com/ZcashFoundation/zebra#future-work) for details. - -## Issues with Tests on macOS - -Some of Zebra's tests deliberately cause errors that make Zebra panic. macOS -records these panics as crash reports. If you are seeing "Crash Reporter" -dialogs during Zebra tests, you can disable them using this Terminal.app -command: - -```sh -defaults write com.apple.CrashReporter DialogType none -``` diff --git a/book/src/user/run.md b/book/src/user/run.md index 2ee478996b6..8d383db60f6 100644 --- a/book/src/user/run.md +++ b/book/src/user/run.md @@ -9,7 +9,12 @@ The configuration format is the TOML encoding of the internal config structure, and documentation for all of the config options can be found [here](https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html). -* `zebrad start` starts a full node. +- `zebrad start` starts a full node. + +You can run Zebra as a: + +- [`lightwalletd` backend](https://zebra.zfnd.org/user/lightwalletd.html), and +- experimental [mining backend](https://zebra.zfnd.org/user/mining.html). ## Supported versions @@ -21,79 +26,3 @@ Always run a supported version of Zebra, and upgrade it regularly, so it doesn't - `1`: Application exited unsuccessfully - `2`: Application crashed - `zebrad` may also return platform-dependent codes. - -## Network Ports and Data Usage - -`zebrad`'s default ports and network usage are -[documented in the README.](https://github.com/ZcashFoundation/zebra#network-ports-and-data-usage) - -If Zebra is configured with a specific [`listen_addr`](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.listen_addr), -it will advertise this address to other nodes for inbound connections. - -Zebra makes outbound connections to peers on any port. -But `zcashd` prefers peers on the default ports, -so that it can't be used for DDoS attacks on other networks. - -The major constraint we've found on `zebrad` performance is the network weather, -especially the ability to make good connections to other Zcash network peers. - -Zebra needs some peers which have a round-trip latency of 2 seconds or less. -If this is a problem for you, please let us know! - -## Improving Performance - -Zebra usually syncs in around a day, depending on your network connection, and the overall Zcash network load. - -If you're having trouble syncing, try the following config changes: - -### Release Build - -Make sure you're using a release build on your native architecture. - -If you're using an ARM machine, -[install the Rust compiler for ARM](https://rust-lang.github.io/rustup/installation/other.html). -If you build using the x86_64 tools, Zebra might run really slowly. - -Run a release build using the -[`cargo install` command from the README.](https://github.com/ZcashFoundation/zebra#build-and-run-instructions) - -### Syncer Lookahead Limit - -If your connection is slow, try -[downloading fewer blocks at a time](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.lookahead_limit): - -```toml -[sync] -lookahead_limit = 1000 -max_concurrent_block_requests = 25 -``` - -### Peer Set Size - -If your connection is slow, try [connecting to fewer peers](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): - -```toml -[network] -peerset_initial_target_size = 25 -``` - -### Turn off debug logging - -Zebra logs at info level by default. - -If Zebra is slow, make sure it is logging at info level: - -```toml -[tracing] -filter = 'info' -``` - -Or restrict debug logging to a specific Zebra component: - -```toml -[tracing] -filter = 'info,zebra_network=debug' -``` - -If you keep on seeing multiple info logs per second, please -[open a bug.](https://github.com/ZcashFoundation/zebra/issues/new/choose) diff --git a/book/src/user/supported-platforms.md b/book/src/user/supported-platforms.md index 07d7d2e970a..152be56537e 100644 --- a/book/src/user/supported-platforms.md +++ b/book/src/user/supported-platforms.md @@ -32,7 +32,6 @@ For the full requirements, see [Tier 2 platform policy](platform-tier-policy.md# | platform | os | notes | rust | artifacts | -------|-------|-------|-------|------- -| `x86_64-apple-darwin` | [GitHub macos-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A | `x86_64-unknown-linux-gnu` | [GitHub ubuntu-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A | `x86_64-unknown-linux-gnu` | [GitHub ubuntu-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest beta release](https://github.com/rust-lang/rust/blob/beta/src/version) | N/A @@ -47,3 +46,5 @@ For the full requirements, see [Tier 3 platform policy](platform-tier-policy.md# | platform | os | notes | rust | artifacts | -------|-------|-------|-------|------- | `aarch64-unknown-linux-gnu` | [Debian 11](https://www.debian.org/releases/bullseye/) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A +| `x86_64-apple-darwin` | [GitHub macos-latest](https://github.com/actions/virtual-environments#available-environments) | 64-bit | [latest stable release](https://github.com/rust-lang/rust/releases) | N/A + diff --git a/book/src/user/troubleshooting.md b/book/src/user/troubleshooting.md new file mode 100644 index 00000000000..b1a89d43576 --- /dev/null +++ b/book/src/user/troubleshooting.md @@ -0,0 +1,87 @@ +# Troubleshooting + +We continuously test that our builds and tests pass on the _latest_ [GitHub +Runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources) +for: + +- macOS, +- Ubuntu, +- Docker: + - Debian Bullseye. + +## Memory Issues + +- If Zebra's build runs out of RAM, try setting `export CARGO_BUILD_JOBS=2`. +- If Zebra's tests timeout or run out of RAM, try running `cargo test -- --test-threads=2`. Note that `cargo` uses all processor cores on your machine + by default. + +## Network Issues + +Some of Zebra's tests download Zcash blocks, so they might be unreliable +depending on your network connection. You can set `ZEBRA_SKIP_NETWORK_TESTS=1` +to skip the network tests. + +## Issues with Tests on macOS + +Some of Zebra's tests deliberately cause errors that make Zebra panic. macOS +records these panics as crash reports. If you are seeing "Crash Reporter" +dialogs during Zebra tests, you can disable them using this Terminal.app +command: + +```sh +defaults write com.apple.CrashReporter DialogType none +``` + +## Improving Performance + +Zebra usually syncs in around three days on Mainnet and half a day on +Testnet. The sync speed depends on your network connection and the overall Zcash +network load. The major constraint we've found on `zebrad` performance is the +network weather, especially the ability to make good connections to other Zcash +network peers. If you're having trouble syncing, try the following config +changes. + +### Release Build + +Make sure you're using a release build on your native architecture. + +### Syncer Lookahead Limit + +If your connection is slow, try +[downloading fewer blocks at a time](https://doc.zebra.zfnd.org/zebrad/config/struct.SyncSection.html#structfield.lookahead_limit): + +```toml +[sync] +lookahead_limit = 1000 +max_concurrent_block_requests = 25 +``` + +### Peer Set Size + +If your connection is slow, try [connecting to fewer peers](https://doc.zebra.zfnd.org/zebra_network/struct.Config.html#structfield.peerset_initial_target_size): + +```toml +[network] +peerset_initial_target_size = 25 +``` + +### Turn off debug logging + +Zebra logs at info level by default. + +If Zebra is slow, make sure it is logging at info level: + +```toml +[tracing] +filter = 'info' +``` + +Or restrict debug logging to a specific Zebra component: + +```toml +[tracing] +filter = 'info,zebra_network=debug' +``` + +If you keep on seeing multiple info logs per second, please +[open a bug.](https://github.com/ZcashFoundation/zebra/issues/new/choose) diff --git a/deny.toml b/deny.toml index 721cadef5bc..5f91c7d266f 100644 --- a/deny.toml +++ b/deny.toml @@ -10,6 +10,13 @@ [bans] # Lint level for when multiple versions of the same crate are detected multiple-versions = "deny" + +# Don't allow wildcard ("any version") dependencies +wildcards = "deny" +# Allow private and dev wildcard dependencies. +# Switch this to `false` when #6924 is implemented. +allow-wildcard-paths = true + # The graph highlighting used when creating dotgraphs for crates # with multiple versions # * lowest-version - The path to the lowest versioned duplicate is highlighted @@ -17,12 +24,18 @@ multiple-versions = "deny" # * all - Both lowest-version and simplest-path are used highlight = "all" -# We don't use this for Zebra. -# # List of crates that are allowed. Use with care! #allow = [ #] +# List of crates that can never become Zebra dependencies. +deny = [ + # Often has memory safety vulnerabilities. + # Enabled by --all-features, use the `cargo hack` script in the deny.toml CI job instead. + { name = "openssl" }, + { name = "openssl-sys" }, +] + # We only use this for some `librustzcash` and `orchard` crates. # If we add a crate here, duplicate dependencies of that crate are still shown. # @@ -44,24 +57,27 @@ skip-tree = [ # wait for prost-build to upgrade { name = "prettyplease", version = "=0.1.25" }, - # ZF crates + # wait for criterion to upgrade + { name = "itertools", version = "=0.10.5" }, - # wait for zcashd and zcash_script to upgrade - # https://github.com/ZcashFoundation/zcash_script/pulls - { name = "metrics", version = "=0.20.1" }, - { name = "sha2", version = "=0.9.9" }, + # wait for h2 and tower to upgrade + { name = "indexmap", version = "=1.9.3" }, - # wait for ed25519-zebra, indexmap, metrics-util, and metrics to upgrade - # ed25519-zebra/hashbrown: https://github.com/ZcashFoundation/ed25519-zebra/pull/65 - { name = "ahash", version = "=0.7.6" }, + # wait for rocksdb to upgrade + { name = "bindgen", version = "=0.65.1" }, - # wait for ed25519-zebra to upgrade - { name = "curve25519-dalek", version = "=3.2.0" }, + # wait for tempfile to upgrade + { name = "rustix", version = "=0.37.23" }, + + # ZF crates + + # wait for indexmap, toml_edit, serde_json, tower to upgrade + { name = "hashbrown", version = "=0.12.3" }, # ECC crates - # wait for zcash_primitives to remove duplicated dependencies - { name = "block-buffer", version = "=0.9.0" }, + # wait for minreq and zcash_proofs to upgrade + { name = "rustls", version = "=0.20.8" }, # zebra-utils dependencies @@ -87,19 +103,12 @@ skip-tree = [ # Optional dependencies # upgrade abscissa (required dependency) and arti (optional dependency) - { name = "darling", version = "=0.10.2" }, { name = "semver", version = "=0.9.0" }, - { name = "tracing-subscriber", version = "=0.1.6" }, # Elasticsearch dependencies # wait for elasticsearch to update base64, darling, rustc_version, serde_with { name = "elasticsearch", version = "=8.5.0-alpha.1" }, - - # Unused dependencies - - # we don't support Windows at the moment (#3801) - { name = "windows-sys", version = "=0.42.0" }, ] # This section is considered when running `cargo deny check sources`. diff --git a/docker/Dockerfile b/docker/Dockerfile index 180a50e52b8..46beb2bdc56 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,5 @@ +# If you want to include a file in the Docker image, add it to .dockerignore. +# # We are using five stages: # - chef: installs cargo-chef # - planner: computes the recipe file @@ -6,6 +8,15 @@ # - release: builds release binary # - runtime: is our runtime environment # +# We first set default values for build arguments used across the stages. +# Each stage must define the build arguments (ARGs) it uses. +# +# Build zebrad with these features +# Keep these in sync with: +# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L37 +ARG FEATURES="default-release-binaries" +ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" + # This stage implements cargo-chef for docker layer caching FROM rust:bullseye as chef RUN cargo install cargo-chef --locked @@ -27,7 +38,7 @@ FROM chef AS deps SHELL ["/bin/bash", "-xo", "pipefail", "-c"] COPY --from=planner /opt/zebrad/recipe.json recipe.json -# Install zebra build deps +# Install zebra build deps and Dockerfile deps RUN apt-get -qq update && \ apt-get -qq install -y --no-install-recommends \ llvm \ @@ -35,6 +46,7 @@ RUN apt-get -qq update && \ clang \ ca-certificates \ protobuf-compiler \ + rsync \ ; \ rm -rf /var/lib/apt/lists/* /tmp/* @@ -55,43 +67,24 @@ RUN if [ "$(uname -m)" != "aarch64" ]; then \ && \ rm -rf /var/lib/apt/lists/* /tmp/* -# Build arguments and variables set to change how tests are run, tracelog levels, -# and Network to be used (Mainnet or Testnet) +# Build arguments and variables set for tracelog levels and debug information # # We set defaults to all variables. ARG RUST_BACKTRACE -ENV RUST_BACKTRACE ${RUST_BACKTRACE:-0} +ENV RUST_BACKTRACE=${RUST_BACKTRACE:-1} ARG RUST_LIB_BACKTRACE -ENV RUST_LIB_BACKTRACE ${RUST_LIB_BACKTRACE:-0} +ENV RUST_LIB_BACKTRACE=${RUST_LIB_BACKTRACE:-1} ARG COLORBT_SHOW_HIDDEN -ENV COLORBT_SHOW_HIDDEN ${COLORBT_SHOW_HIDDEN:-0} - -ARG RUST_LOG -ENV RUST_LOG ${RUST_LOG:-info} - -# Skip IPv6 tests by default, as some CI environment don't have IPv6 available -ARG ZEBRA_SKIP_IPV6_TESTS -ENV ZEBRA_SKIP_IPV6_TESTS ${ZEBRA_SKIP_IPV6_TESTS:-1} - -# Use default checkpoint sync and network values if none is provided -ARG CHECKPOINT_SYNC -ENV CHECKPOINT_SYNC ${CHECKPOINT_SYNC:-true} +ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1} -# Build zebrad with these features -# Keep these in sync with: -# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L42 -ARG FEATURES="sentry" -ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" -# Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, -# separately from the test and production image builds. -ENV ENTRYPOINT_FEATURES "$TEST_FEATURES $FEATURES" - -ARG NETWORK -ENV NETWORK ${NETWORK:-Mainnet} +ARG SHORT_SHA +# If this is not set, it must be the empty string, so Zebra can try an alternative git commit source: +# https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 +ENV SHORT_SHA=${SHORT_SHA:-} -ENV CARGO_HOME /opt/zebrad/.cargo/ +ENV CARGO_HOME="/opt/zebrad/.cargo/" # In this stage we build tests (without running then) # @@ -100,25 +93,51 @@ ENV CARGO_HOME /opt/zebrad/.cargo/ FROM deps AS tests # TODO: do not hardcode the user /root/ even though is a safe assumption # Pre-download Zcash Sprout, Sapling parameters and Lightwalletd binary -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/lightwalletd /opt/lightwalletd /usr/local/bin +COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/zcash-params:edge /root/.zcash-params /root/.zcash-params +COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/lightwalletd:edge /opt/lightwalletd /usr/local/bin + +# cargo uses timestamps for its cache, so they need to be in this order: +# unmodified source files < previous build cache < modified source files +COPY . . + +# Skip IPv6 tests by default, as some CI environment don't have IPv6 available +ARG ZEBRA_SKIP_IPV6_TESTS +ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} + +# Use ENTRYPOINT_FEATURES to override the specific features used to run tests in entrypoint.sh, +# separately from the test and production image builds. +ARG FEATURES +ARG TEST_FEATURES +ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" # Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage, +# over the top of the original source files, # and build it to cache all possible sentry and test dependencies. # -# This is the caching Docker layer for Rust! +# This is the caching Docker layer for Rust tests! +# It creates fake empty test binaries so dependencies are built, but Zebra is not fully built. # -# TODO: is it faster to use --tests here? -RUN cargo chef cook --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --recipe-path recipe.json - -COPY . . -RUN cargo test --locked --release --features "${TEST_FEATURES} ${FEATURES}" --workspace --no-run +# TODO: add --locked when cargo-chef supports it +RUN cargo chef cook --tests --release --features "${ENTRYPOINT_FEATURES}" --workspace --recipe-path recipe.json + +# Undo the source file changes made by cargo-chef. +# rsync invalidates the cargo cache for the changed files only, by updating their timestamps. +# This makes sure the fake empty binaries created by cargo-chef are rebuilt. +COPY --from=planner /opt/zebrad zebra-original +RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . +RUN rm -r zebra-original + +# Build Zebra test binaries, but don't run them +RUN cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run RUN cp /opt/zebrad/target/release/zebrad /usr/local/bin RUN cp /opt/zebrad/target/release/zebra-checkpoints /usr/local/bin COPY ./docker/entrypoint.sh / RUN chmod u+x /entrypoint.sh +# Entrypoint environment variables +ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES} + # By default, runs the entrypoint tests specified by the environmental variables (if any are set) ENTRYPOINT [ "/entrypoint.sh" ] @@ -128,80 +147,48 @@ ENTRYPOINT [ "/entrypoint.sh" ] # `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting # zebrad binary from this step. FROM deps AS release -RUN cargo chef cook --release --features "${FEATURES}" --recipe-path recipe.json COPY . . -# Build zebra + +ARG FEATURES + +# This is the caching layer for Rust zebrad builds. +# It creates a fake empty zebrad binary, see above for details. +# +# TODO: add --locked when cargo-chef supports it +RUN cargo chef cook --release --features "${FEATURES}" --package zebrad --bin zebrad --recipe-path recipe.json + +# Undo the source file changes made by cargo-chef, so the fake empty zebrad binary is rebuilt. +COPY --from=planner /opt/zebrad zebra-original +RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . +RUN rm -r zebra-original + +# Build zebrad RUN cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad +COPY ./docker/runtime-entrypoint.sh / +RUN chmod u+x /runtime-entrypoint.sh + # This stage is only used when deploying nodes or when only the resulting zebrad binary is needed # # To save space, this step starts from scratch using debian, and only adds the resulting # binary from the `release` stage, and the Zcash Sprout & Sapling parameters from ZCash FROM debian:bullseye-slim AS runtime COPY --from=release /opt/zebrad/target/release/zebrad /usr/local/bin -COPY --from=us-docker.pkg.dev/zealous-zebra/zebra/zcash-params /root/.zcash-params /root/.zcash-params +COPY --from=release /runtime-entrypoint.sh / +COPY --from=us-docker.pkg.dev/zfnd-dev-zebra/zebra/zcash-params:edge /root/.zcash-params /root/.zcash-params RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates -ARG CHECKPOINT_SYNC=true -ARG NETWORK=Mainnet -ARG RPC_PORT - -# Use a configurable dir and file for the zebrad configuration file -ARG ZEBRA_CONF_DIR=/etc/zebra -ENV ZEBRA_CONF_DIR ${ZEBRA_CONF_DIR} - -ARG ZEBRA_CONF_FILE=zebrad.toml -ENV ZEBRA_CONF_FILE ${ZEBRA_CONF_FILE} - -ARG ZEBRA_CONF_PATH=${ZEBRA_CONF_DIR}/${ZEBRA_CONF_FILE} -ENV ZEBRA_CONF_PATH ${ZEBRA_CONF_PATH} - -# Build the `zebrad.toml` before starting the container, using the arguments from build -# time, or using the default values set just above. And create the conf path and file if -# it does not exist. -# -# We disable most ports by default, so the default config is secure. -# Users have to opt-in to additional functionality by editing `zebrad.toml`. -# -# It is safe to use multiple RPC threads in Docker, because we know we are the only running -# `zebrad` or `zcashd` process in the container. -# -# TODO: -# - move this file creation to an entrypoint as we can use default values at runtime, -# and modify those as needed when starting the container (at runtime and not at build time) -# - make `cache_dir`, `rpc.listen_addr`, `metrics.endpoint_addr`, and `tracing.endpoint_addr` into Docker arguments -RUN mkdir -p ${ZEBRA_CONF_DIR} \ - && touch ${ZEBRA_CONF_PATH} -RUN set -ex; \ - { \ - echo "[network]"; \ - echo "network = '${NETWORK}'"; \ - echo "listen_addr = '127.0.0.1'"; \ - echo "[consensus]"; \ - echo "checkpoint_sync = ${CHECKPOINT_SYNC}"; \ - echo "[state]"; \ - echo "cache_dir = '/zebrad-cache'"; \ - echo "[rpc]"; \ - [ -n "$RPC_PORT" ] && echo "listen_addr = '127.0.0.1:${RPC_PORT}'"; \ - echo "parallel_cpu_threads = 0"; \ - echo "[metrics]"; \ - echo "#endpoint_addr = '127.0.0.1:9999'"; \ - echo "[tracing]"; \ - echo "#endpoint_addr = '127.0.0.1:3000'"; \ - } > "${ZEBRA_CONF_PATH}" - - -EXPOSE 8233 18233 $RPC_PORT - -ARG SHORT_SHA -ENV SHORT_SHA $SHORT_SHA +# Config settings for zebrad +ARG FEATURES +ENV FEATURES=${FEATURES} -ARG SENTRY_DSN -ENV SENTRY_DSN ${SENTRY_DSN} +# Expose configured ports +EXPOSE 8233 18233 -# TODO: remove the specified config file location and use the default expected by zebrad -CMD zebrad -c "${ZEBRA_CONF_PATH}" start +# Update the config file based on the Docker run variables, +# and launch zebrad with it +ENTRYPOINT [ "/runtime-entrypoint.sh" ] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 653122caf43..9afb9b78e23 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -7,8 +7,9 @@ set -e # exit if any command in a pipeline fails set -o pipefail -# TODO: expand this section if needed (#4363) +: "${NETWORK:=Mainnet}" echo "Test variables:" +echo "NETWORK=$NETWORK" echo "ZEBRA_TEST_LIGHTWALLETD=$ZEBRA_TEST_LIGHTWALLETD" echo "Hard-coded Zebra full sync directory: /zebrad-cache" echo "ZEBRA_CACHED_STATE_DIR=$ZEBRA_CACHED_STATE_DIR" diff --git a/docker/runtime-entrypoint.sh b/docker/runtime-entrypoint.sh new file mode 100755 index 00000000000..613619795f5 --- /dev/null +++ b/docker/runtime-entrypoint.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash + +# Show the commands we are executing +set -x +# Exit if a command fails +set -e +# Exit if any command in a pipeline fails +set -o pipefail + +# Set this to change the default cached state directory +# Path and name of the config file +: "${ZEBRA_CONF_DIR:=/etc/zebrad}" +: "${ZEBRA_CONF_FILE:=zebrad.toml}" +if [[ -n "$ZEBRA_CONF_DIR" ]] && [[ -n "$ZEBRA_CONF_FILE" ]]; then + ZEBRA_CONF_PATH="$ZEBRA_CONF_DIR/$ZEBRA_CONF_FILE" +fi + +# [network] +: "${NETWORK:=Mainnet}" +: "${ZEBRA_LISTEN_ADDR:=0.0.0.0}" +# [consensus] +: "${ZEBRA_CHECKPOINT_SYNC:=true}" +# [state] +: "${ZEBRA_CACHED_STATE_DIR:=/var/cache/zebrad-cache}" +# [metrics] +: "${METRICS_ENDPOINT_ADDR:=0.0.0.0}" +: "${METRICS_ENDPOINT_PORT:=9999}" +# [tracing] +: "${LOG_COLOR:=false}" +: "${TRACING_ENDPOINT_ADDR:=0.0.0.0}" +: "${TRACING_ENDPOINT_PORT:=3000}" +# [rpc] +: "${RPC_LISTEN_ADDR:=0.0.0.0}" +if [[ -z "${RPC_PORT}" ]]; then +if [[ " ${FEATURES} " =~ " getblocktemplate-rpcs " ]]; then +if [[ "${NETWORK}" = "Mainnet" ]]; then +: "${RPC_PORT:=8232}" +elif [[ "${NETWORK}" = "Testnet" ]]; then +: "${RPC_PORT:=18232}" +fi +fi +fi + +# Populate `zebrad.toml` before starting zebrad, using the environmental +# variables set by the Dockerfile or the user. If the user has already created a config, don't replace it. +# +# We disable most ports by default, so the default config is secure. +# Users have to opt-in to additional functionality by setting environmental variables. +if [[ -n "$ZEBRA_CONF_PATH" ]] && [[ ! -f "$ZEBRA_CONF_PATH" ]]; then + +# Create the conf path and file +mkdir -p "$ZEBRA_CONF_DIR" +touch "$ZEBRA_CONF_PATH" + +# Populate the conf file +cat < "$ZEBRA_CONF_PATH" +[network] +network = "$NETWORK" +listen_addr = "$ZEBRA_LISTEN_ADDR" +[state] +cache_dir = "$ZEBRA_CACHED_STATE_DIR" +EOF + +if [[ " $FEATURES " =~ " prometheus " ]]; then # spaces are important here to avoid partial matches +cat <> "$ZEBRA_CONF_PATH" +[metrics] +endpoint_addr = "${METRICS_ENDPOINT_ADDR}:${METRICS_ENDPOINT_PORT}" +EOF +fi + +if [[ -n "${RPC_PORT}" ]]; then +cat <> "${ZEBRA_CONF_PATH}" +[rpc] +listen_addr = "${RPC_LISTEN_ADDR}:${RPC_PORT}" +EOF +fi + +if [[ -n "$LOG_FILE" ]] || [[ -n "$LOG_COLOR" ]] || [[ -n "$TRACING_ENDPOINT_ADDR" ]]; then +cat <> "$ZEBRA_CONF_PATH" +[tracing] +EOF +if [[ " $FEATURES " =~ " filter-reload " ]]; then # spaces are important here to avoid partial matches +cat <> "$ZEBRA_CONF_PATH" +endpoint_addr = "${TRACING_ENDPOINT_ADDR}:${TRACING_ENDPOINT_PORT}" +EOF +fi +# Set this to log to a file, if not set, logs to standard output +if [[ -n "$LOG_FILE" ]]; then +mkdir -p "$(dirname "$LOG_FILE")" +cat <> "$ZEBRA_CONF_PATH" +log_file = "${LOG_FILE}" +EOF +fi + +# Zebra automatically detects if it is attached to a terminal, and uses colored output. +# Set this to 'true' to force using color even if the output is not a terminal. +# Set this to 'false' to disable using color even if the output is a terminal. +if [[ "$LOG_COLOR" = "true" ]]; then +cat <> "$ZEBRA_CONF_PATH" +force_use_color = true +EOF +elif [[ "$LOG_COLOR" = "false" ]]; then +cat <> "$ZEBRA_CONF_PATH" +use_color = false +EOF +fi +fi + +if [[ -n "$MINER_ADDRESS" ]]; then +cat <> "$ZEBRA_CONF_PATH" +[mining] +miner_address = "${MINER_ADDRESS}" +EOF +fi +fi + +echo "Using zebrad.toml:" +cat "$ZEBRA_CONF_PATH" + +exec zebrad -c "$ZEBRA_CONF_PATH" "$@" diff --git a/docker/zcash-params/Dockerfile b/docker/zcash-params/Dockerfile index 1036a2be40b..dce8153d185 100644 --- a/docker/zcash-params/Dockerfile +++ b/docker/zcash-params/Dockerfile @@ -1,39 +1,19 @@ -# This steps implement cargo-chef for docker layer caching -# This image is for caching Zcash Sprout and Sapling parameters -FROM rust:bullseye as chef -RUN cargo install cargo-chef --locked -WORKDIR /opt/zebrad +# This image is for caching Zcash Sprout and Sapling parameters. +# We don't test it automatically in CI due to download server rate-limiting. +# To manually run it on the PR branch before merging, go to: +# https://github.com/ZcashFoundation/zebra/actions/workflows/zcash-params.yml + +FROM debian:bullseye-slim AS release + +# Just use the precompiled zebrad binary from a recent release image. +# +# It doesn't matter what build or commit of Zebra we use, because it just calls into the +# zcash_proofs download code. (Which doesn't change much.) +# Test image zebrad binaries would also work, but it's harder to get a recent tag for them. +# +# Compiling the download-params example using `cargo ` is another alternative: +# `cargo run --locked --release --features default-docker --example download-params` +COPY --from=zfnd/zebra:latest /usr/local/bin/zebrad /usr/local/bin -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM chef AS release -COPY --from=planner /opt/zebrad/recipe.json recipe.json - -# Install zebra build deps -RUN apt-get -qq update && \ - apt-get -qq install -y --no-install-recommends \ - llvm \ - libclang-dev \ - clang \ - ca-certificates \ - ; \ - rm -rf /var/lib/apt/lists/* /tmp/* - -ENV CARGO_HOME /opt/zebrad/.cargo/ -# Build dependencies - this is the caching Docker layer! -RUN cargo chef cook --release --features sentry --package zebrad --recipe-path recipe.json - -ARG RUST_BACKTRACE=0 -ENV RUST_BACKTRACE ${RUST_BACKTRACE} - -ARG RUST_LIB_BACKTRACE=0 -ENV RUST_LIB_BACKTRACE ${RUST_LIB_BACKTRACE} - -ARG COLORBT_SHOW_HIDDEN=0 -ENV COLORBT_SHOW_HIDDEN ${COLORBT_SHOW_HIDDEN} - -COPY . . # Pre-download Zcash Sprout and Sapling parameters -RUN cargo run --locked --release --features sentry --package zebrad --bin zebrad download +RUN zebrad download diff --git a/firebase.json b/firebase.json index d69ad9a177b..7dd48adfe68 100644 --- a/firebase.json +++ b/firebase.json @@ -1,23 +1,63 @@ { - "hosting": { - "public": "target/doc", - "ignore": [ - "firebase.json", - "**/.*", - "**/node_modules/**" - ], - "rewrites": [ - { - "source": "**", - "destination": "/index.html" - } - ], - "redirects": [ - { - "source": "/", - "destination": "/zebrad", - "type": 301 - } - ] - } + "hosting": [ + { + "public": "target/external", + "target": "docs-external", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ], + "redirects": [ + { + "source": "/", + "destination": "/zebrad", + "type": 301 + } + ] + }, + { + "public": "target/internal", + "target": "docs-internal", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ], + "redirects": [ + { + "source": "/", + "destination": "/zebrad", + "type": 301 + } + ] + }, + { + "public": "target/book", + "target": "docs-book", + "ignore": [ + "firebase.json", + "**/.*", + "**/node_modules/**" + ], + "rewrites": [ + { + "source": "**", + "destination": "/index.html" + } + ] + } + ] } diff --git a/release.toml b/release.toml new file mode 100644 index 00000000000..0cbcdd51772 --- /dev/null +++ b/release.toml @@ -0,0 +1,20 @@ +# Only allow releases from the main branch +allow-branch = [ 'main' ] + +# TODO: +# configure all zebra-* crates with a shared version, +# and all tower-* crates with a different one: +# https://github.com/crate-ci/cargo-release/blob/master/docs/reference.md#config-fields +#shared-version = "TODO named groups" + +# Verify releases with release features +# +# TODO: add this feature to all crates +#enable-features = [ 'default-release-binaries' ] + +# Don't do a git push or tag +push = false +tag = false + +# Owners for new crates +owners = [ 'dconnolly', 'teor2345', 'zcashfoundation/owners' ] diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml new file mode 100644 index 00000000000..1df5a5e52e3 --- /dev/null +++ b/tower-batch-control/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "tower-batch-control" +version = "0.2.41-beta.4" +authors = ["Zcash Foundation ", "Tower Maintainers "] +description = "Tower middleware for batch request processing" +# # Legal +# +# This licence is deliberately different to the rest of Zebra. +# +# This code was modified from a 2019 version of: +# https://github.com/tower-rs/tower/tree/master/tower/src/buffer +license = "MIT" +repository = "https://github.com/ZcashFoundation/zebra" +edition = "2021" + +# TODO: decide if we want to use the Zebra readme and home page +#readme = "../README.md" +#homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["tower", "batch"] +# Must be one of +categories = ["algorithms", "asynchronous"] + +[dependencies] +futures = "0.3.28" +futures-core = "0.3.28" +pin-project = "1.1.3" +rayon = "1.7.0" +tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "macros"] } +tokio-util = "0.7.8" +tower = { version = "0.4.13", features = ["util", "buffer"] } +tracing = "0.1.37" +tracing-futures = "0.2.5" + +[dev-dependencies] +color-eyre = "0.6.2" +# This is a transitive dependency via color-eyre. +# Enable a feature that makes tinyvec compile much faster. +tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } + +ed25519-zebra = "4.0.1" +rand = "0.8.5" + +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } +tokio-test = "0.4.2" +tower-fallback = { path = "../tower-fallback/" } +tower-test = "0.4.0" + +zebra-consensus = { path = "../zebra-consensus/" } +zebra-test = { path = "../zebra-test/" } diff --git a/tower-batch-control/LICENSE b/tower-batch-control/LICENSE new file mode 100644 index 00000000000..9862976a6ce --- /dev/null +++ b/tower-batch-control/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019 Tower Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/tower-batch/src/error.rs b/tower-batch-control/src/error.rs similarity index 100% rename from tower-batch/src/error.rs rename to tower-batch-control/src/error.rs diff --git a/tower-batch/src/future.rs b/tower-batch-control/src/future.rs similarity index 100% rename from tower-batch/src/future.rs rename to tower-batch-control/src/future.rs diff --git a/tower-batch/src/layer.rs b/tower-batch-control/src/layer.rs similarity index 100% rename from tower-batch/src/layer.rs rename to tower-batch-control/src/layer.rs diff --git a/tower-batch/src/lib.rs b/tower-batch-control/src/lib.rs similarity index 95% rename from tower-batch/src/lib.rs rename to tower-batch-control/src/lib.rs index 855b1a962b0..2628a09562e 100644 --- a/tower-batch/src/lib.rs +++ b/tower-batch-control/src/lib.rs @@ -84,6 +84,14 @@ //! a `Service`. The wrapped service does not need to implement any batch //! control logic, as it will receive explicit [`Flush`](BatchControl::Flush) //! requests from the wrapper. +//! +//! ## Implementation History +//! +//! The `tower-batch-control` code was modified from a 2019 version of: +//! +//! +//! A modified fork of this crate is available on crates.io as `tower-batch`. +//! It is focused on batching disk writes. pub mod error; pub mod future; diff --git a/tower-batch/src/message.rs b/tower-batch-control/src/message.rs similarity index 100% rename from tower-batch/src/message.rs rename to tower-batch-control/src/message.rs diff --git a/tower-batch/src/service.rs b/tower-batch-control/src/service.rs similarity index 100% rename from tower-batch/src/service.rs rename to tower-batch-control/src/service.rs diff --git a/tower-batch/src/worker.rs b/tower-batch-control/src/worker.rs similarity index 100% rename from tower-batch/src/worker.rs rename to tower-batch-control/src/worker.rs diff --git a/tower-batch/tests/ed25519.rs b/tower-batch-control/tests/ed25519.rs similarity index 99% rename from tower-batch/tests/ed25519.rs rename to tower-batch-control/tests/ed25519.rs index c45e196d2f1..773b1e3e017 100644 --- a/tower-batch/tests/ed25519.rs +++ b/tower-batch-control/tests/ed25519.rs @@ -7,7 +7,7 @@ use ed25519_zebra::*; use futures::stream::{FuturesOrdered, StreamExt}; use rand::thread_rng; use tower::{Service, ServiceExt}; -use tower_batch::Batch; +use tower_batch_control::Batch; use tower_fallback::Fallback; // ============ service impl ============ diff --git a/tower-batch/tests/worker.rs b/tower-batch-control/tests/worker.rs similarity index 98% rename from tower-batch/tests/worker.rs rename to tower-batch-control/tests/worker.rs index 640af1fa2a9..1e7a18b79f9 100644 --- a/tower-batch/tests/worker.rs +++ b/tower-batch-control/tests/worker.rs @@ -4,7 +4,7 @@ use std::time::Duration; use tokio_test::{assert_pending, assert_ready, assert_ready_err, task}; use tower::{Service, ServiceExt}; -use tower_batch::{error, Batch}; +use tower_batch_control::{error, Batch}; use tower_test::mock; #[tokio::test] diff --git a/tower-batch/Cargo.toml b/tower-batch/Cargo.toml deleted file mode 100644 index 2c874d54ca6..00000000000 --- a/tower-batch/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "tower-batch" -version = "0.2.39" -authors = ["Zcash Foundation "] -license = "MIT" -edition = "2021" - -[dependencies] -futures = "0.3.28" -futures-core = "0.3.28" -pin-project = "1.1.0" -rayon = "1.7.0" -tokio = { version = "1.28.0", features = ["time", "sync", "tracing", "macros"] } -tokio-util = "0.7.8" -tower = { version = "0.4.13", features = ["util", "buffer"] } -tracing = "0.1.37" -tracing-futures = "0.2.5" - -[dev-dependencies] -color-eyre = "0.6.2" -# This is a transitive dependency via color-eyre. -# Enable a feature that makes tinyvec compile much faster. -tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } - -ed25519-zebra = "3.1.0" -rand = { version = "0.8.5", package = "rand" } - -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } -tokio-test = "0.4.2" -tower-fallback = { path = "../tower-fallback/" } -tower-test = "0.4.0" - -zebra-consensus = { path = "../zebra-consensus/" } -zebra-test = { path = "../zebra-test/" } diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index e13cee274e9..6cd7ed219e5 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,17 +1,27 @@ [package] name = "tower-fallback" -version = "0.2.39" +version = "0.2.41-beta.4" authors = ["Zcash Foundation "] -license = "MIT" +description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." +license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +# TODO: decide if we want to use the Zebra readme and home page +#readme = "../README.md" +#homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["tower", "batch"] +# Must be one of +categories = ["algorithms", "asynchronous"] + [dependencies] -pin-project = "1.1.0" +pin-project = "1.1.3" tower = "0.4.13" futures-core = "0.3.28" tracing = "0.1.37" [dev-dependencies] -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 5d448dda815..5c2020bb64a 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,11 +1,18 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "Core Zcash data structures" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "encoding"] [features] default = [] @@ -17,11 +24,19 @@ json-conversion = [ "serde_json", ] +# Async error handling convenience traits +async-error = [ + "tokio", +] + # Experimental mining RPC support getblocktemplate-rpcs = [ "zcash_address", ] +# Experimental elasticsearch support +elasticsearch = [] + # Test-only features proptest-impl = [ @@ -29,7 +44,7 @@ proptest-impl = [ "proptest-derive", "rand", "rand_chacha", - "tokio", + "tokio/tracing", "zebra-test", ] @@ -39,15 +54,16 @@ bench = ["zebra-test"] # Cryptography bitvec = "1.0.1" -bitflags = "2.2.1" +bitflags = "2.3.3" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.1" blake2s_simd = "1.0.1" -bs58 = { version = "0.4.0", features = ["check"] } +bridgetree = "0.3.0" +bs58 = { version = "0.5.0", features = ["check"] } byteorder = "1.4.3" equihash = "0.2.0" group = "0.13.0" -incrementalmerkletree = "0.3.1" +incrementalmerkletree = "0.4.0" jubjub = "0.10.0" lazy_static = "1.4.0" num-integer = "0.1.45" @@ -55,65 +71,66 @@ primitive-types = "0.11.1" rand_core = "0.6.4" ripemd = "0.1.3" # Matches version used by hdwallet -secp256k1 = { version = "0.21.3", features = ["serde"] } -sha2 = { version = "0.9.9", features = ["compress"] } +secp256k1 = { version = "0.26.0", features = ["serde"] } +sha2 = { version = "0.10.7", features = ["compress"] } uint = "0.9.5" -x25519-dalek = { version = "2.0.0-pre.1", features = ["serde"] } +x25519-dalek = { version = "2.0.0-rc.3", features = ["serde"] } # ECC deps halo2 = { package = "halo2_proofs", version = "0.3.0" } -orchard = "0.4.0" +orchard = "0.5.0" zcash_encoding = "0.2.0" zcash_history = "0.3.0" -zcash_note_encryption = "0.3.0" -zcash_primitives = { version = "0.11.0", features = ["transparent-inputs"] } +zcash_note_encryption = "0.4.0" +zcash_primitives = { version = "0.12.0", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting displaydoc = "0.2.4" static_assertions = "1.1.0" -thiserror = "1.0.40" +thiserror = "1.0.44" tracing = "0.1.37" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.163", features = ["serde_derive", "rc"] } -serde_with = "3.0.0" +serde = { version = "1.0.179", features = ["serde_derive", "rc"] } +serde_with = "3.2.0" serde-big-array = "0.5.1" # Processing futures = "0.3.28" -itertools = "0.10.5" +itertools = "0.11.0" rayon = "1.7.0" # ZF deps -ed25519-zebra = "3.1.0" +ed25519-zebra = "4.0.1" redjubjub = "0.7.0" -reddsa = "0.5.0" +reddsa = "0.5.1" # Production feature json-conversion -serde_json = { version = "1.0.95", optional = true } +serde_json = { version = "1.0.104", optional = true } + +# Production feature async-error and testing feature proptest-impl +tokio = { version = "1.29.1", optional = true } # Experimental feature getblocktemplate-rpcs -zcash_address = { version = "0.2.1", optional = true } +zcash_address = { version = "0.3.0", optional = true } # Optional testing dependencies -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -rand = { version = "0.8.5", optional = true, package = "rand" } +rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -tokio = { version = "1.28.0", features = ["tracing"], optional = true } - -zebra-test = { path = "../zebra-test/", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.28", optional = true } [dev-dependencies] # Benchmarks -criterion = { version = "0.4.0", features = ["html_reports"] } +criterion = { version = "0.5.1", features = ["html_reports"] } # Error Handling & Formatting color-eyre = "0.6.2" @@ -124,13 +141,13 @@ spandoc = "0.2.2" tracing = "0.1.37" # Make the optional testing dependencies required -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" rand_chacha = "0.3.1" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-chain/src/block.rs b/zebra-chain/src/block.rs index 3ae0988194a..d472e930c7f 100644 --- a/zebra-chain/src/block.rs +++ b/zebra-chain/src/block.rs @@ -43,7 +43,10 @@ pub use arbitrary::LedgerState; /// A Zcash block, containing a header and a list of transactions. #[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub struct Block { /// The block header, containing block metadata. pub header: Arc
, diff --git a/zebra-chain/src/block/arbitrary.rs b/zebra-chain/src/block/arbitrary.rs index 9d6eb1867fb..36734c86b24 100644 --- a/zebra-chain/src/block/arbitrary.rs +++ b/zebra-chain/src/block/arbitrary.rs @@ -349,7 +349,9 @@ impl Arbitrary for Block { fn arbitrary_with(ledger_state: Self::Parameters) -> Self::Strategy { let transactions_strategy = - (1..MAX_ARBITRARY_ITEMS).prop_flat_map(move |transaction_count| { + // Generate a random number transactions. A coinbase tx is always generated, so if + // `transaction_count` is zero, the block will contain only the coinbase tx. + (0..MAX_ARBITRARY_ITEMS).prop_flat_map(move |transaction_count| { Transaction::vec_strategy(ledger_state, transaction_count) }); diff --git a/zebra-chain/src/block/height.rs b/zebra-chain/src/block/height.rs index 70bc17e818c..46569221c9c 100644 --- a/zebra-chain/src/block/height.rs +++ b/zebra-chain/src/block/height.rs @@ -65,6 +65,29 @@ impl Height { /// previous to Nu5 and in non-coinbase transactions from Nu5 activation /// height and above. pub const MAX_EXPIRY_HEIGHT: Height = Height(499_999_999); + + /// Returns the next [`Height`]. + /// + /// # Panics + /// + /// - If the current height is at its maximum. + pub fn next(self) -> Self { + (self + 1).expect("Height should not be at its maximum.") + } + + /// Returns the previous [`Height`]. + /// + /// # Panics + /// + /// - If the current height is at its minimum. + pub fn previous(self) -> Self { + (self - 1).expect("Height should not be at its minimum.") + } + + /// Returns `true` if the [`Height`] is at its minimum. + pub fn is_min(self) -> bool { + self == Self::MIN + } } /// A difference between two [`Height`]s, possibly negative. @@ -82,7 +105,11 @@ impl TryFrom for Height { /// Checks that the `height` is within the valid [`Height`] range. fn try_from(height: u32) -> Result { // Check the bounds. - if Height::MIN.0 <= height && height <= Height::MAX.0 { + // + // Clippy warns that `height >= Height::MIN.0` is always true. + assert_eq!(Height::MIN.0, 0); + + if height <= Height::MAX.0 { Ok(Height(height)) } else { Err("heights must be less than or equal to Height::MAX") @@ -201,9 +228,6 @@ fn operator_tests() { assert_eq!(None, Height(i32::MAX as u32) + 1); assert_eq!(None, Height(u32::MAX) + 0); - assert_eq!(Some(Height(2)), Height(1) + 1); - assert_eq!(None, Height::MAX + 1); - // Adding negative numbers assert_eq!(Some(Height(1)), Height(2) + -1); assert_eq!(Some(Height(0)), Height(1) + -1); diff --git a/zebra-chain/src/block/merkle.rs b/zebra-chain/src/block/merkle.rs index 4e6dd98919a..42762bbe6ca 100644 --- a/zebra-chain/src/block/merkle.rs +++ b/zebra-chain/src/block/merkle.rs @@ -9,7 +9,7 @@ use crate::{ transaction::{self, Transaction, UnminedTx, UnminedTxId, VerifiedUnminedTx}, }; -#[cfg(any(any(test, feature = "proptest-impl"), feature = "proptest-impl"))] +#[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; /// The root of the Bitcoin-inherited transaction Merkle tree, binding the @@ -486,7 +486,7 @@ mod tests { // Compute the AuthDataRoot with a single [0xFF; 32] digest. // Since ZIP-244 specifies that this value must be used as the auth digest of // pre-V5 transactions, then the roots must match. - let expect_auth_root = vec![AuthDigest([0xFF; 32])] + let expect_auth_root = [AuthDigest([0xFF; 32])] .iter() .copied() .collect::(); diff --git a/zebra-chain/src/diagnostic.rs b/zebra-chain/src/diagnostic.rs index 560838fb32b..1a453143aff 100644 --- a/zebra-chain/src/diagnostic.rs +++ b/zebra-chain/src/diagnostic.rs @@ -1,6 +1,15 @@ -//! Tracing the execution time of functions. -//! -//! TODO: also trace polling time for futures, using a `Future` wrapper +//! Diagnostic types and functions for Zebra: +//! - code performance +//! - task handling +//! - errors and panics + +pub mod task; + +// Tracing the execution time of functions. +// +// TODO: +// - move this to a `timing` submodule +// - also trace polling time for futures, using a `Future` wrapper use std::time::{Duration, Instant}; diff --git a/zebra-chain/src/diagnostic/task.rs b/zebra-chain/src/diagnostic/task.rs new file mode 100644 index 00000000000..2d43f695537 --- /dev/null +++ b/zebra-chain/src/diagnostic/task.rs @@ -0,0 +1,47 @@ +//! Diagnostic types and functions for Zebra tasks: +//! - OS thread handling +//! - async future task handling +//! - errors and panics + +#[cfg(feature = "async-error")] +pub mod future; + +pub mod thread; + +/// A trait that checks a task's return value for panics. +pub trait CheckForPanics { + /// The output type, after removing panics from `Self`. + type Output; + + /// Check if `self` contains a panic payload or an unexpected termination, then panic. + /// Otherwise, return the non-panic part of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload or an unexpected termination. + #[track_caller] + fn check_for_panics(self) -> Self::Output; +} + +/// A trait that waits for a task to finish, then handles panics and cancellations. +pub trait WaitForPanics { + /// The underlying task output, after removing panics and unwrapping termination results. + type Output; + + /// Waits for `self` to finish, then check if its output is: + /// - a panic payload: resume that panic, + /// - an unexpected termination: panic with that error, + /// - an expected termination: hang waiting for shutdown. + /// + /// Otherwise, returns the task return value of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload or an unexpected termination. + /// + /// # Hangs + /// + /// If `self` contains an expected termination, and we're shutting down anyway. + #[track_caller] + fn wait_for_panics(self) -> Self::Output; +} diff --git a/zebra-chain/src/diagnostic/task/future.rs b/zebra-chain/src/diagnostic/task/future.rs new file mode 100644 index 00000000000..431b13ed94f --- /dev/null +++ b/zebra-chain/src/diagnostic/task/future.rs @@ -0,0 +1,93 @@ +//! Diagnostic types and functions for Zebra async future tasks: +//! - task handles +//! - errors and panics + +use std::{future, panic}; + +use futures::future::{BoxFuture, FutureExt}; +use tokio::task::{JoinError, JoinHandle}; + +use crate::shutdown::is_shutting_down; + +use super::{CheckForPanics, WaitForPanics}; + +/// This is the return type of the [`JoinHandle`] future. +impl CheckForPanics for Result { + /// The [`JoinHandle`]'s task output, after resuming any panics, + /// and ignoring task cancellations on shutdown. + type Output = Result; + + /// Returns the task result if the task finished normally. + /// Otherwise, resumes any panics, logs unexpected errors, and ignores any expected errors. + /// + /// If the task finished normally, returns `Some(T)`. + /// If the task was cancelled, returns `None`. + #[track_caller] + fn check_for_panics(self) -> Self::Output { + match self { + Ok(task_output) => Ok(task_output), + Err(join_error) => Err(join_error.check_for_panics()), + } + } +} + +impl CheckForPanics for JoinError { + /// The [`JoinError`] after resuming any panics, and logging any unexpected task cancellations. + type Output = JoinError; + + /// Resume any panics and panic on unexpected task cancellations. + /// Always returns [`JoinError::Cancelled`](JoinError::is_cancelled). + #[track_caller] + fn check_for_panics(self) -> Self::Output { + match self.try_into_panic() { + Ok(panic_payload) => panic::resume_unwind(panic_payload), + + // We could ignore this error, but then we'd have to change the return type. + Err(task_cancelled) if is_shutting_down() => { + debug!( + ?task_cancelled, + "ignoring cancelled task because Zebra is shutting down" + ); + + task_cancelled + } + + Err(task_cancelled) => { + panic!("task cancelled during normal Zebra operation: {task_cancelled:?}"); + } + } + } +} + +impl WaitForPanics for JoinHandle +where + T: Send + 'static, +{ + type Output = BoxFuture<'static, T>; + + /// Returns a future which waits for `self` to finish, then checks if its output is: + /// - a panic payload: resume that panic, + /// - an unexpected termination: panic with that error, + /// - an expected termination: hang waiting for shutdown. + /// + /// Otherwise, returns the task return value of `self`. + /// + /// # Panics + /// + /// If `self` contains a panic payload, or [`JoinHandle::abort()`] has been called on `self`. + /// + /// # Hangs + /// + /// If `self` contains an expected termination, and we're shutting down anyway. + /// Futures hang by returning `Pending` and not setting a waker, so this uses minimal resources. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + async move { + match self.await.check_for_panics() { + Ok(task_output) => task_output, + Err(_expected_cancel_error) => future::pending().await, + } + } + .boxed() + } +} diff --git a/zebra-chain/src/diagnostic/task/thread.rs b/zebra-chain/src/diagnostic/task/thread.rs new file mode 100644 index 00000000000..84df3fac4aa --- /dev/null +++ b/zebra-chain/src/diagnostic/task/thread.rs @@ -0,0 +1,108 @@ +//! Diagnostic types and functions for Zebra OS thread tasks: +//! - task handles +//! - errors and panics + +use std::{ + panic, + sync::Arc, + thread::{self, JoinHandle}, +}; + +use super::{CheckForPanics, WaitForPanics}; + +impl CheckForPanics for thread::Result { + type Output = T; + + /// Panics if the thread panicked. + /// + /// Threads can't be cancelled except by using a panic, so there are no thread errors here. + #[track_caller] + fn check_for_panics(self) -> Self::Output { + match self { + // The value returned by the thread when it finished. + Ok(thread_output) => thread_output, + + // A thread error is always a panic. + Err(panic_payload) => panic::resume_unwind(panic_payload), + } + } +} + +impl WaitForPanics for JoinHandle { + type Output = T; + + /// Waits for the thread to finish, then panics if the thread panicked. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + self.join().check_for_panics() + } +} + +impl WaitForPanics for Arc> { + type Output = Option; + + /// If this is the final `Arc`, waits for the thread to finish, then panics if the thread + /// panicked. Otherwise, returns the thread's return value. + /// + /// If this is not the final `Arc`, drops the handle and immediately returns `None`. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + // If we are the last Arc with a reference to this handle, + // we can wait for it and propagate any panics. + // + // We use into_inner() because it guarantees that exactly one of the tasks gets the + // JoinHandle. try_unwrap() lets us keep the JoinHandle, but it can also miss panics. + // + // This is more readable as an expanded statement. + #[allow(clippy::manual_map)] + if let Some(handle) = Arc::into_inner(self) { + Some(handle.wait_for_panics()) + } else { + None + } + } +} + +impl CheckForPanics for &mut Option>> { + type Output = Option; + + /// If this is the final `Arc`, checks if the thread has finished, then panics if the thread + /// panicked. Otherwise, returns the thread's return value. + /// + /// If the thread has not finished, or this is not the final `Arc`, returns `None`. + #[track_caller] + fn check_for_panics(self) -> Self::Output { + let handle = self.take()?; + + if handle.is_finished() { + // This is the same as calling `self.wait_for_panics()`, but we can't do that, + // because we've taken `self`. + #[allow(clippy::manual_map)] + return handle.wait_for_panics(); + } + + *self = Some(handle); + + None + } +} + +impl WaitForPanics for &mut Option>> { + type Output = Option; + + /// If this is the final `Arc`, waits for the thread to finish, then panics if the thread + /// panicked. Otherwise, returns the thread's return value. + /// + /// If this is not the final `Arc`, drops the handle and returns `None`. + #[track_caller] + fn wait_for_panics(self) -> Self::Output { + // This is more readable as an expanded statement. + #[allow(clippy::manual_map)] + if let Some(output) = self.take()?.wait_for_panics() { + Some(output) + } else { + // Some other task has a reference, so we should give up ours to let them use it. + None + } + } +} diff --git a/zebra-chain/src/fmt.rs b/zebra-chain/src/fmt.rs index 800663147b6..98923446c99 100644 --- a/zebra-chain/src/fmt.rs +++ b/zebra-chain/src/fmt.rs @@ -162,7 +162,7 @@ where } /// Wrapper to override `Debug`, redirecting it to hex-encode the type. -/// The type must be hex-encodable. +/// The type must implement `AsRef<[u8]>`. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] #[serde(transparent)] diff --git a/zebra-chain/src/orchard/note/ciphertexts.rs b/zebra-chain/src/orchard/note/ciphertexts.rs index 72cfeb98a13..8f857cf1444 100644 --- a/zebra-chain/src/orchard/note/ciphertexts.rs +++ b/zebra-chain/src/orchard/note/ciphertexts.rs @@ -1,3 +1,5 @@ +//! Encrypted parts of Orchard notes. + use std::{fmt, io}; use serde_big_array::BigArray; @@ -17,9 +19,7 @@ impl Copy for EncryptedNote {} impl Clone for EncryptedNote { fn clone(&self) -> Self { - let mut bytes = [0; 580]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } @@ -86,9 +86,7 @@ impl Copy for WrappedNoteKey {} impl Clone for WrappedNoteKey { fn clone(&self) -> Self { - let mut bytes = [0; 80]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/orchard/shielded_data.rs b/zebra-chain/src/orchard/shielded_data.rs index dc55d19a8f7..3a034c05f0f 100644 --- a/zebra-chain/src/orchard/shielded_data.rs +++ b/zebra-chain/src/orchard/shielded_data.rs @@ -269,9 +269,6 @@ impl ZcashDeserialize for Flags { // Consensus rule: "In a version 5 transaction, // the reserved bits 2..7 of the flagsOrchard field MUST be zero." // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus - // - // Clippy 1.64 is wrong here, this lazy evaluation is necessary, constructors are functions. This is fixed in 1.66. - #[allow(clippy::unnecessary_lazy_evaluations)] Flags::from_bits(reader.read_u8()?) .ok_or_else(|| SerializationError::Parse("invalid reserved orchard flags")) } diff --git a/zebra-chain/src/orchard/sinsemilla.rs b/zebra-chain/src/orchard/sinsemilla.rs index 060fbfb39e8..d7d05813a5c 100644 --- a/zebra-chain/src/orchard/sinsemilla.rs +++ b/zebra-chain/src/orchard/sinsemilla.rs @@ -159,10 +159,6 @@ pub fn sinsemilla_hash(D: &[u8], M: &BitVec) -> Option { extract_p_bottom(sinsemilla_hash_to_point(D, M)) } -// TODO: test the above correctness and compatibility with the zcash-hackworks test vectors -// https://github.com/ZcashFoundation/zebra/issues/2079 -// https://github.com/zcash-hackworks/zcash-test-vectors/pulls - #[cfg(test)] mod tests { diff --git a/zebra-chain/src/orchard/tree.rs b/zebra-chain/src/orchard/tree.rs index 6af9680c5f2..9862bd8f7fb 100644 --- a/zebra-chain/src/orchard/tree.rs +++ b/zebra-chain/src/orchard/tree.rs @@ -14,16 +14,16 @@ use std::{ fmt, hash::{Hash, Hasher}, io, - ops::Deref, sync::Arc, }; use bitvec::prelude::*; +use bridgetree; use halo2::pasta::{group::ff::PrimeField, pallas}; -use incrementalmerkletree::{bridgetree, Frontier}; +use incrementalmerkletree::Hashable; use lazy_static::lazy_static; use thiserror::Error; -use zcash_primitives::merkle_tree::{self, CommitmentTree}; +use zcash_primitives::merkle_tree::{write_commitment_tree, HashSer}; use super::sinsemilla::*; @@ -31,6 +31,9 @@ use crate::serialization::{ serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize, }; +pub mod legacy; +use legacy::LegacyNoteCommitmentTree; + /// The type that is used to update the note commitment tree. /// /// Unfortunately, this is not the same as `orchard::NoteCommitment`. @@ -165,18 +168,18 @@ impl ZcashDeserialize for Root { /// A node of the Orchard Incremental Note Commitment Tree. #[derive(Copy, Clone, Debug, Eq, PartialEq)] -struct Node(pallas::Base); +pub struct Node(pallas::Base); /// Required to convert [`NoteCommitmentTree`] into [`SerializedTree`]. /// /// Zebra stores Orchard note commitment trees as [`Frontier`][1]s while the /// [`z_gettreestate`][2] RPC requires [`CommitmentTree`][3]s. Implementing -/// [`merkle_tree::Hashable`] for [`Node`]s allows the conversion. +/// [`HashSer`] for [`Node`]s allows the conversion. /// /// [1]: bridgetree::Frontier /// [2]: https://zcash.github.io/rpc/z_gettreestate.html -/// [3]: merkle_tree::CommitmentTree -impl merkle_tree::Hashable for Node { +/// [3]: incrementalmerkletree::frontier::CommitmentTree +impl HashSer for Node { fn read(mut reader: R) -> io::Result { let mut repr = [0u8; 32]; reader.read_exact(&mut repr)?; @@ -193,24 +196,9 @@ impl merkle_tree::Hashable for Node { fn write(&self, mut writer: W) -> io::Result<()> { writer.write_all(&self.0.to_repr()) } - - fn combine(level: usize, a: &Self, b: &Self) -> Self { - let level = u8::try_from(level).expect("level must fit into u8"); - let layer = MERKLE_DEPTH - 1 - level; - Self(merkle_crh_orchard(layer, a.0, b.0)) - } - - fn blank() -> Self { - Self(NoteCommitmentTree::uncommitted()) - } - - fn empty_root(level: usize) -> Self { - let layer_below = usize::from(MERKLE_DEPTH) - level; - Self(EMPTY_ROOTS[layer_below]) - } } -impl incrementalmerkletree::Hashable for Node { +impl Hashable for Node { fn empty_leaf() -> Self { Self(NoteCommitmentTree::uncommitted()) } @@ -218,13 +206,13 @@ impl incrementalmerkletree::Hashable for Node { /// Combine two nodes to generate a new node in the given level. /// Level 0 is the layer above the leaves (layer 31). /// Level 31 is the root (layer 0). - fn combine(level: incrementalmerkletree::Altitude, a: &Self, b: &Self) -> Self { + fn combine(level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { let layer = MERKLE_DEPTH - 1 - u8::from(level); Self(merkle_crh_orchard(layer, a.0, b.0)) } /// Return the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Altitude) -> Self { + fn empty_root(level: incrementalmerkletree::Level) -> Self { let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); Self(EMPTY_ROOTS[layer_below]) } @@ -266,6 +254,8 @@ pub enum NoteCommitmentTreeError { /// Orchard Incremental Note Commitment Tree #[derive(Debug, Serialize, Deserialize)] +#[serde(into = "LegacyNoteCommitmentTree")] +#[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { /// The tree represented as a Frontier. /// @@ -312,7 +302,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_x: NoteCommitmentUpdate) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(&cm_x.into()) { + if self.inner.append(cm_x.into()) { // Invalidate cached root let cached_root = self .cached_root @@ -330,14 +320,9 @@ impl NoteCommitmentTree { /// Returns the current root of the tree, used as an anchor in Orchard /// shielded transactions. pub fn root(&self) -> Root { - if let Some(root) = self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked") - .deref() - { + if let Some(root) = self.cached_root() { // Return cached root. - return *root; + return root; } // Get exclusive access, compute the root, and cache it. @@ -351,13 +336,27 @@ impl NoteCommitmentTree { Some(root) => root, None => { // Compute root and cache it. - let root = Root(self.inner.root().0); + let root = self.recalculate_root(); *write_root = Some(root); root } } } + /// Returns the current root of the tree, if it has already been cached. + #[allow(clippy::unwrap_in_result)] + pub fn cached_root(&self) -> Option { + *self + .cached_root + .read() + .expect("a thread that previously held exclusive lock access panicked") + } + + /// Calculates and returns the current root of the tree, ignoring any caching. + pub fn recalculate_root(&self) -> Root { + Root(self.inner.root().0) + } + /// Get the Pallas-based Sinsemilla hash / root node of this merkle tree of /// note commitments. pub fn hash(&self) -> [u8; 32] { @@ -377,17 +376,37 @@ impl NoteCommitmentTree { /// /// For Orchard, the tree is capped at 2^32. pub fn count(&self) -> u64 { - self.inner.position().map_or(0, |pos| u64::from(pos) + 1) + self.inner + .value() + .map_or(0, |x| u64::from(x.position()) + 1) + } + + /// Checks if the tree roots and inner data structures of `self` and `other` are equal. + /// + /// # Panics + /// + /// If they aren't equal, with a message explaining the differences. + /// + /// Only for use in tests. + #[cfg(any(test, feature = "proptest-impl"))] + pub fn assert_frontier_eq(&self, other: &Self) { + // It's technically ok for the cached root not to be preserved, + // but it can result in expensive cryptographic operations, + // so we fail the tests if it happens. + assert_eq!(self.cached_root(), other.cached_root()); + + // Check the data in the internal data structure + assert_eq!(self.inner, other.inner); + + // Check the RPC serialization format (not the same as the Zebra database format) + assert_eq!(SerializedTree::from(self), SerializedTree::from(other)); } } impl Clone for NoteCommitmentTree { /// Clones the inner tree, and creates a new `RwLock` with the cloned root data. fn clone(&self) -> Self { - let cached_root = *self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked"); + let cached_root = self.cached_root(); Self { inner: self.inner.clone(), @@ -433,7 +452,7 @@ impl From> for NoteCommitmentTree { /// A serialized Orchard note commitment tree. /// /// The format of the serialized data is compatible with -/// [`CommitmentTree`](merkle_tree::CommitmentTree) from `librustzcash` and not +/// [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree) from `librustzcash` and not /// with [`Frontier`](bridgetree::Frontier) from the crate /// [`incrementalmerkletree`]. Zebra follows the former format in order to stay /// consistent with `zcashd` in RPCs. Note that [`NoteCommitmentTree`] itself is @@ -442,7 +461,7 @@ impl From> for NoteCommitmentTree { /// The formats are semantically equivalent. The primary difference between them /// is that in [`Frontier`](bridgetree::Frontier), the vector of parents is /// dense (we know where the gaps are from the position of the leaf in the -/// overall tree); whereas in [`CommitmentTree`](merkle_tree::CommitmentTree), +/// overall tree); whereas in [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree), /// the vector of parent hashes is sparse with [`None`] values in the gaps. /// /// The sparse format, used in this implementation, allows representing invalid @@ -472,8 +491,9 @@ impl From<&NoteCommitmentTree> for SerializedTree { // Convert the note commitment tree from // [`Frontier`](bridgetree::Frontier) to // [`CommitmentTree`](merkle_tree::CommitmentTree). - let tree = CommitmentTree::from_frontier(&tree.inner); - tree.write(&mut serialized_tree) + let tree = incrementalmerkletree::frontier::CommitmentTree::from_frontier(&tree.inner); + + write_commitment_tree(&tree, &mut serialized_tree) .expect("note commitment tree should be serializable"); Self(serialized_tree) } diff --git a/zebra-chain/src/orchard/tree/legacy.rs b/zebra-chain/src/orchard/tree/legacy.rs new file mode 100644 index 00000000000..b4d97cf48d1 --- /dev/null +++ b/zebra-chain/src/orchard/tree/legacy.rs @@ -0,0 +1,122 @@ +//! Orchard serialization legacy code. +//! +//! We create a [`LegacyNoteCommitmentTree`] which is a copy of [`NoteCommitmentTree`] but where serialization and +//! deserialization can be derived. +//! To do this we create a [`LegacyFrontier`] which is a legacy `Frontier` structure that can be found in [1], +//! In order to make [`LegacyFrontier`] serializable we also have our own versions of `NonEmptyFrontier` ([`LegacyNonEmptyFrontier`]), +//! `Leaf`([`LegacyLeaf`]) and `Position`([`LegacyPosition`]) that can be found in [1] or [2]. +//! +//! Conversions methods to/from [`LegacyNoteCommitmentTree`] to/from [`NoteCommitmentTree`] are defined also in this file. +//! +//! [1]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/bridgetree.rs +//! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs + +use incrementalmerkletree::{frontier::Frontier, Position}; + +use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A legacy version of [`NoteCommitmentTree`]. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename = "NoteCommitmentTree")] +#[allow(missing_docs)] +pub struct LegacyNoteCommitmentTree { + pub inner: LegacyFrontier, + cached_root: std::sync::RwLock>, +} + +impl From for LegacyNoteCommitmentTree { + fn from(nct: NoteCommitmentTree) -> Self { + LegacyNoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +impl From for NoteCommitmentTree { + fn from(legacy_nct: LegacyNoteCommitmentTree) -> Self { + NoteCommitmentTree { + inner: legacy_nct.inner.into(), + cached_root: legacy_nct.cached_root, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Frontier")] +#[allow(missing_docs)] +pub struct LegacyFrontier { + frontier: Option>, +} + +impl From> for Frontier { + fn from(legacy_frontier: LegacyFrontier) -> Self { + if let Some(legacy_frontier_data) = legacy_frontier.frontier { + let mut ommers = legacy_frontier_data.ommers; + let position = Position::from( + u64::try_from(legacy_frontier_data.position.0) + .expect("old `usize` always fits in `u64`"), + ); + let leaf = match legacy_frontier_data.leaf { + LegacyLeaf::Left(a) => a, + LegacyLeaf::Right(a, b) => { + ommers.insert(0, a); + b + } + }; + Frontier::from_parts( + position, + leaf, + ommers, + ) + .expect("We should be able to construct a frontier from parts given legacy frontier is not empty") + } else { + Frontier::empty() + } + } +} + +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { + if let Some(frontier_data) = frontier.value() { + let leaf_from_frontier = *frontier_data.leaf(); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier); + let mut ommers = frontier_data.ommers().to_vec(); + let position = usize::try_from(u64::from(frontier_data.position())) + .expect("new position should fit in a `usize`"); + if frontier_data.position().is_odd() { + let left = ommers.remove(0); + leaf = LegacyLeaf::Right(left, leaf_from_frontier); + } + LegacyFrontier { + frontier: Some(LegacyNonEmptyFrontier { + position: LegacyPosition(position), + leaf, + ommers: ommers.to_vec(), + }), + } + } else { + LegacyFrontier { frontier: None } + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "NonEmptyFrontier")] +struct LegacyNonEmptyFrontier { + position: LegacyPosition, + leaf: LegacyLeaf, + ommers: Vec, +} + +/// A set of leaves of a Merkle tree. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Leaf")] +enum LegacyLeaf { + Left(A), + Right(A, A), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +struct LegacyPosition(usize); diff --git a/zebra-chain/src/parameters/network.rs b/zebra-chain/src/parameters/network.rs index 6c6319ce765..05f0a587738 100644 --- a/zebra-chain/src/parameters/network.rs +++ b/zebra-chain/src/parameters/network.rs @@ -59,7 +59,7 @@ pub enum Network { #[default] Mainnet, - /// The testnet. + /// The oldest public test network. Testnet, } @@ -119,6 +119,16 @@ impl Network { Network::Testnet => "test".to_string(), } } + + /// Return the lowercase network name. + pub fn lowercase_name(&self) -> String { + self.to_string().to_ascii_lowercase() + } + + /// Returns `true` if this network is a testing network. + pub fn is_a_test_network(&self) -> bool { + *self != Network::Mainnet + } } impl FromStr for Network { diff --git a/zebra-chain/src/primitives/proofs/bctv14.rs b/zebra-chain/src/primitives/proofs/bctv14.rs index abef385349c..ac1f6d5f0c9 100644 --- a/zebra-chain/src/primitives/proofs/bctv14.rs +++ b/zebra-chain/src/primitives/proofs/bctv14.rs @@ -25,9 +25,7 @@ impl Copy for Bctv14Proof {} impl Clone for Bctv14Proof { fn clone(&self) -> Self { - let mut bytes = [0; 296]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/primitives/proofs/groth16.rs b/zebra-chain/src/primitives/proofs/groth16.rs index 8153b2fb3a8..43f661a38fe 100644 --- a/zebra-chain/src/primitives/proofs/groth16.rs +++ b/zebra-chain/src/primitives/proofs/groth16.rs @@ -25,9 +25,7 @@ impl Copy for Groth16Proof {} impl Clone for Groth16Proof { fn clone(&self) -> Self { - let mut bytes = [0; 192]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/sapling/note/ciphertexts.rs b/zebra-chain/src/sapling/note/ciphertexts.rs index 47fe5606861..472dbfb0a44 100644 --- a/zebra-chain/src/sapling/note/ciphertexts.rs +++ b/zebra-chain/src/sapling/note/ciphertexts.rs @@ -1,3 +1,5 @@ +//! Encrypted parts of Sapling notes. + use std::{fmt, io}; use serde_big_array::BigArray; @@ -24,9 +26,7 @@ impl Copy for EncryptedNote {} impl Clone for EncryptedNote { fn clone(&self) -> Self { - let mut bytes = [0; 580]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } @@ -73,9 +73,7 @@ impl Copy for WrappedNoteKey {} impl Clone for WrappedNoteKey { fn clone(&self) -> Self { - let mut bytes = [0; 80]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/sapling/tree.rs b/zebra-chain/src/sapling/tree.rs index f5307a8e0a1..ea045e325ab 100644 --- a/zebra-chain/src/sapling/tree.rs +++ b/zebra-chain/src/sapling/tree.rs @@ -14,19 +14,17 @@ use std::{ fmt, hash::{Hash, Hasher}, io, - ops::Deref, sync::Arc, }; use bitvec::prelude::*; -use incrementalmerkletree::{ - bridgetree::{self, Leaf}, - Frontier, -}; +use bridgetree::{self}; +use incrementalmerkletree::{frontier::Frontier, Hashable}; + use lazy_static::lazy_static; use thiserror::Error; use zcash_encoding::{Optional, Vector}; -use zcash_primitives::merkle_tree::{self, Hashable}; +use zcash_primitives::merkle_tree::HashSer; use super::commitment::pedersen_hashes::pedersen_hash; @@ -34,6 +32,9 @@ use crate::serialization::{ serde_helpers, ReadZcashExt, SerializationError, ZcashDeserialize, ZcashSerialize, }; +pub mod legacy; +use legacy::{LegacyLeaf, LegacyNoteCommitmentTree}; + /// The type that is used to update the note commitment tree. /// /// Unfortunately, this is not the same as `sapling::NoteCommitment`. @@ -86,12 +87,6 @@ lazy_static! { }; } -/// The index of a note's commitment at the leafmost layer of its Note -/// Commitment Tree. -/// -/// -pub struct Position(pub(crate) u64); - /// Sapling note commitment tree root node hash. /// /// The root hash in LEBS2OSP256(rt) encoding of the Sapling note @@ -168,7 +163,7 @@ impl ZcashDeserialize for Root { /// Note that it's handled as a byte buffer and not a point coordinate (jubjub::Fq) /// because that's how the spec handles the MerkleCRH^Sapling function inputs and outputs. #[derive(Copy, Clone, Eq, PartialEq)] -struct Node([u8; 32]); +pub struct Node([u8; 32]); impl fmt::Debug for Node { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -180,12 +175,12 @@ impl fmt::Debug for Node { /// /// Zebra stores Sapling note commitment trees as [`Frontier`][1]s while the /// [`z_gettreestate`][2] RPC requires [`CommitmentTree`][3]s. Implementing -/// [`merkle_tree::Hashable`] for [`Node`]s allows the conversion. +/// [`incrementalmerkletree::Hashable`] for [`Node`]s allows the conversion. /// /// [1]: bridgetree::Frontier /// [2]: https://zcash.github.io/rpc/z_gettreestate.html -/// [3]: merkle_tree::CommitmentTree -impl merkle_tree::Hashable for Node { +/// [3]: incrementalmerkletree::frontier::CommitmentTree +impl HashSer for Node { fn read(mut reader: R) -> io::Result { let mut node = [0u8; 32]; reader.read_exact(&mut node)?; @@ -195,24 +190,9 @@ impl merkle_tree::Hashable for Node { fn write(&self, mut writer: W) -> io::Result<()> { writer.write_all(self.0.as_ref()) } - - fn combine(level: usize, a: &Self, b: &Self) -> Self { - let level = u8::try_from(level).expect("level must fit into u8"); - let layer = MERKLE_DEPTH - 1 - level; - Self(merkle_crh_sapling(layer, a.0, b.0)) - } - - fn blank() -> Self { - Self(NoteCommitmentTree::uncommitted()) - } - - fn empty_root(level: usize) -> Self { - let layer_below = usize::from(MERKLE_DEPTH) - level; - Self(EMPTY_ROOTS[layer_below]) - } } -impl incrementalmerkletree::Hashable for Node { +impl Hashable for Node { fn empty_leaf() -> Self { Self(NoteCommitmentTree::uncommitted()) } @@ -220,13 +200,13 @@ impl incrementalmerkletree::Hashable for Node { /// Combine two nodes to generate a new node in the given level. /// Level 0 is the layer above the leaves (layer 31). /// Level 31 is the root (layer 0). - fn combine(level: incrementalmerkletree::Altitude, a: &Self, b: &Self) -> Self { + fn combine(level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { let layer = MERKLE_DEPTH - 1 - u8::from(level); Self(merkle_crh_sapling(layer, a.0, b.0)) } /// Return the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Altitude) -> Self { + fn empty_root(level: incrementalmerkletree::Level) -> Self { let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); Self(EMPTY_ROOTS[layer_below]) } @@ -268,6 +248,8 @@ pub enum NoteCommitmentTreeError { /// Sapling Incremental Note Commitment Tree. #[derive(Debug, Serialize, Deserialize)] +#[serde(into = "LegacyNoteCommitmentTree")] +#[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { /// The tree represented as a [`Frontier`](bridgetree::Frontier). /// @@ -285,7 +267,7 @@ pub struct NoteCommitmentTree { /// /// /// Note: MerkleDepth^Sapling = MERKLE_DEPTH = 32. - inner: bridgetree::Frontier, + inner: Frontier, /// A cached root of the tree. /// @@ -315,7 +297,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm_u: NoteCommitmentUpdate) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(&cm_u.into()) { + if self.inner.append(cm_u.into()) { // Invalidate cached root let cached_root = self .cached_root @@ -333,14 +315,9 @@ impl NoteCommitmentTree { /// Returns the current root of the tree, used as an anchor in Sapling /// shielded transactions. pub fn root(&self) -> Root { - if let Some(root) = self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked") - .deref() - { + if let Some(root) = self.cached_root() { // Return cached root. - return *root; + return root; } // Get exclusive access, compute the root, and cache it. @@ -354,13 +331,27 @@ impl NoteCommitmentTree { Some(root) => root, None => { // Compute root and cache it. - let root = Root::try_from(self.inner.root().0).unwrap(); + let root = self.recalculate_root(); *write_root = Some(root); root } } } + /// Returns the current root of the tree, if it has already been cached. + #[allow(clippy::unwrap_in_result)] + pub fn cached_root(&self) -> Option { + *self + .cached_root + .read() + .expect("a thread that previously held exclusive lock access panicked") + } + + /// Calculates and returns the current root of the tree, ignoring any caching. + pub fn recalculate_root(&self) -> Root { + Root::try_from(self.inner.root().0).unwrap() + } + /// Gets the Jubjub-based Pedersen hash of root node of this merkle tree of /// note commitments. pub fn hash(&self) -> [u8; 32] { @@ -380,7 +371,30 @@ impl NoteCommitmentTree { /// /// For Sapling, the tree is capped at 2^32. pub fn count(&self) -> u64 { - self.inner.position().map_or(0, |pos| u64::from(pos) + 1) + self.inner + .value() + .map_or(0, |x| u64::from(x.position()) + 1) + } + + /// Checks if the tree roots and inner data structures of `self` and `other` are equal. + /// + /// # Panics + /// + /// If they aren't equal, with a message explaining the differences. + /// + /// Only for use in tests. + #[cfg(any(test, feature = "proptest-impl"))] + pub fn assert_frontier_eq(&self, other: &Self) { + // It's technically ok for the cached root not to be preserved, + // but it can result in expensive cryptographic operations, + // so we fail the tests if it happens. + assert_eq!(self.cached_root(), other.cached_root()); + + // Check the data in the internal data structure + assert_eq!(self.inner, other.inner); + + // Check the RPC serialization format (not the same as the Zebra database format) + assert_eq!(SerializedTree::from(self), SerializedTree::from(other)); } } @@ -388,10 +402,7 @@ impl Clone for NoteCommitmentTree { /// Clones the inner tree, and creates a new [`RwLock`](std::sync::RwLock) /// with the cloned root data. fn clone(&self) -> Self { - let cached_root = *self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked"); + let cached_root = self.cached_root(); Self { inner: self.inner.clone(), @@ -437,7 +448,7 @@ impl From> for NoteCommitmentTree { /// A serialized Sapling note commitment tree. /// /// The format of the serialized data is compatible with -/// [`CommitmentTree`](merkle_tree::CommitmentTree) from `librustzcash` and not +/// [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree) from `librustzcash` and not /// with [`Frontier`](bridgetree::Frontier) from the crate /// [`incrementalmerkletree`]. Zebra follows the former format in order to stay /// consistent with `zcashd` in RPCs. Note that [`NoteCommitmentTree`] itself is @@ -446,7 +457,7 @@ impl From> for NoteCommitmentTree { /// The formats are semantically equivalent. The primary difference between them /// is that in [`Frontier`](bridgetree::Frontier), the vector of parents is /// dense (we know where the gaps are from the position of the leaf in the -/// overall tree); whereas in [`CommitmentTree`](merkle_tree::CommitmentTree), +/// overall tree); whereas in [`CommitmentTree`](incrementalmerkletree::frontier::CommitmentTree), /// the vector of parent hashes is sparse with [`None`] values in the gaps. /// /// The sparse format, used in this implementation, allows representing invalid @@ -463,6 +474,9 @@ impl From<&NoteCommitmentTree> for SerializedTree { fn from(tree: &NoteCommitmentTree) -> Self { let mut serialized_tree = vec![]; + // + let legacy_tree = LegacyNoteCommitmentTree::from(tree.clone()); + // Convert the note commitment tree represented as a frontier into the // format compatible with `zcashd`. // @@ -476,20 +490,22 @@ impl From<&NoteCommitmentTree> for SerializedTree { // sparse formats for Sapling. // // [1]: - if let Some(frontier) = tree.inner.value() { - let (left_leaf, right_leaf) = match frontier.leaf() { - Leaf::Left(left_value) => (Some(left_value), None), - Leaf::Right(left_value, right_value) => (Some(left_value), Some(right_value)), + if let Some(frontier) = legacy_tree.inner.frontier { + let (left_leaf, right_leaf) = match frontier.leaf { + LegacyLeaf::Left(left_value) => (Some(left_value), None), + LegacyLeaf::Right(left_value, right_value) => (Some(left_value), Some(right_value)), }; // Ommers are siblings of parent nodes along the branch from the // most recent leaf to the root of the tree. - let mut ommers_iter = frontier.ommers().iter(); + let mut ommers_iter = frontier.ommers.iter(); // Set bits in the binary representation of the position indicate // the presence of ommers along the branch from the most recent leaf // node to the root of the tree, except for the lowest bit. - let mut position: usize = frontier.position().into(); + let mut position: u64 = (frontier.position.0) + .try_into() + .expect("old usize position always fit in u64"); // The lowest bit does not indicate the presence of any ommers. We // clear it so that we can test if there are no set bits left in @@ -526,7 +542,6 @@ impl From<&NoteCommitmentTree> for SerializedTree { } // Serialize the converted note commitment tree. - Optional::write(&mut serialized_tree, left_leaf, |tree, leaf| { leaf.write(tree) }) diff --git a/zebra-chain/src/sapling/tree/legacy.rs b/zebra-chain/src/sapling/tree/legacy.rs new file mode 100644 index 00000000000..0e66e8aedea --- /dev/null +++ b/zebra-chain/src/sapling/tree/legacy.rs @@ -0,0 +1,125 @@ +//! Sapling serialization legacy code. +//! +//! We create a [`LegacyNoteCommitmentTree`] which is a copy of [`NoteCommitmentTree`] but where serialization and +//! deserialization can be derived. +//! To do this we create a [`LegacyFrontier`] which is a legacy `Frontier` structure that can be found in [1], +//! In order to make [`LegacyFrontier`] serializable we also have our own versions of `NonEmptyFrontier` ([`LegacyNonEmptyFrontier`]), +//! `Leaf`([`LegacyLeaf`]) and `Position`([`LegacyPosition`]) that can be found in [1] or [2]. +//! +//! Conversions methods to/from [`LegacyNoteCommitmentTree`] to/from [`NoteCommitmentTree`] are defined also in this file. +//! +//! [1]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/bridgetree.rs +//! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs + +use incrementalmerkletree::{frontier::Frontier, Position}; + +use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A legacy version of [`NoteCommitmentTree`]. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename = "NoteCommitmentTree")] +#[allow(missing_docs)] +pub struct LegacyNoteCommitmentTree { + pub inner: LegacyFrontier, + cached_root: std::sync::RwLock>, +} + +impl From for LegacyNoteCommitmentTree { + fn from(nct: NoteCommitmentTree) -> Self { + LegacyNoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +impl From for NoteCommitmentTree { + fn from(nct: LegacyNoteCommitmentTree) -> Self { + NoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Frontier")] +#[allow(missing_docs)] +pub struct LegacyFrontier { + pub frontier: Option>, +} + +impl From> for Frontier { + fn from(legacy_frontier: LegacyFrontier) -> Self { + if let Some(legacy_frontier_data) = legacy_frontier.frontier { + let mut ommers = legacy_frontier_data.ommers; + let position = Position::from( + u64::try_from(legacy_frontier_data.position.0) + .expect("old `usize` always fits in `u64`"), + ); + let leaf = match legacy_frontier_data.leaf { + LegacyLeaf::Left(a) => a, + LegacyLeaf::Right(a, b) => { + ommers.insert(0, a); + b + } + }; + Frontier::from_parts( + position, + leaf, + ommers, + ) + .expect("We should be able to construct a frontier from parts given legacy frontier is not empty") + } else { + Frontier::empty() + } + } +} + +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { + if let Some(frontier_data) = frontier.value() { + let leaf_from_frontier = *frontier_data.leaf(); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier); + let mut ommers = frontier_data.ommers().to_vec(); + let position = usize::try_from(u64::from(frontier_data.position())) + .expect("new position should fit in a `usize`"); + if frontier_data.position().is_odd() { + let left = ommers.remove(0); + leaf = LegacyLeaf::Right(left, leaf_from_frontier); + } + LegacyFrontier { + frontier: Some(LegacyNonEmptyFrontier { + position: LegacyPosition(position), + leaf, + ommers: ommers.to_vec(), + }), + } + } else { + LegacyFrontier { frontier: None } + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "NonEmptyFrontier")] +#[allow(missing_docs)] +pub struct LegacyNonEmptyFrontier { + pub position: LegacyPosition, + pub leaf: LegacyLeaf, + pub ommers: Vec, +} + +/// A set of leaves of a Merkle tree. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Leaf")] +#[allow(missing_docs)] +pub enum LegacyLeaf { + Left(A), + Right(A, A), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +#[allow(missing_docs)] +pub struct LegacyPosition(pub usize); diff --git a/zebra-chain/src/serialization/sha256d.rs b/zebra-chain/src/serialization/sha256d.rs index 484d921eb91..00eab58314b 100644 --- a/zebra-chain/src/serialization/sha256d.rs +++ b/zebra-chain/src/serialization/sha256d.rs @@ -14,7 +14,7 @@ impl Writer { /// Consume the Writer and produce the hash result. pub fn finish(self) -> [u8; 32] { let result1 = self.hash.finalize(); - let result2 = Sha256::digest(&result1); + let result2 = Sha256::digest(result1); let mut buffer = [0u8; 32]; buffer[0..32].copy_from_slice(&result2[0..32]); buffer @@ -39,7 +39,7 @@ pub struct Checksum(pub [u8; 4]); impl<'a> From<&'a [u8]> for Checksum { fn from(bytes: &'a [u8]) -> Self { let hash1 = Sha256::digest(bytes); - let hash2 = Sha256::digest(&hash1); + let hash2 = Sha256::digest(hash1); let mut checksum = [0u8; 4]; checksum[0..4].copy_from_slice(&hash2[0..4]); Self(checksum) diff --git a/zebra-chain/src/sprout/joinsplit.rs b/zebra-chain/src/sprout/joinsplit.rs index 059ac4be5a0..ca891e5f892 100644 --- a/zebra-chain/src/sprout/joinsplit.rs +++ b/zebra-chain/src/sprout/joinsplit.rs @@ -1,6 +1,6 @@ //! Sprout funds transfers using [`JoinSplit`]s. -use std::io; +use std::{fmt, io}; use serde::{Deserialize, Serialize}; @@ -49,7 +49,7 @@ impl From<&RandomSeed> for [u8; 32] { /// A _JoinSplit Description_, as described in [protocol specification §7.2][ps]. /// /// [ps]: https://zips.z.cash/protocol/protocol.pdf#joinsplitencoding -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct JoinSplit { /// A value that the JoinSplit transfer removes from the transparent value /// pool. @@ -81,6 +81,23 @@ pub struct JoinSplit { pub enc_ciphertexts: [note::EncryptedNote; 2], } +impl fmt::Debug for JoinSplit

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("JoinSplit") + .field("vpub_old", &self.vpub_old) + .field("vpub_new", &self.vpub_new) + .field("anchor", &self.anchor) + .field("nullifiers", &self.nullifiers) + .field("commitments", &self.commitments) + .field("ephemeral_key", &HexDebug(self.ephemeral_key.as_bytes())) + .field("random_seed", &self.random_seed) + .field("vmacs", &self.vmacs) + .field("zkproof", &self.zkproof) + .field("enc_ciphertexts", &self.enc_ciphertexts) + .finish() + } +} + impl ZcashSerialize for JoinSplit

{ fn zcash_serialize(&self, mut writer: W) -> Result<(), io::Error> { self.vpub_old.zcash_serialize(&mut writer)?; diff --git a/zebra-chain/src/sprout/note/ciphertexts.rs b/zebra-chain/src/sprout/note/ciphertexts.rs index 37628c2c965..7fd3bb42b72 100644 --- a/zebra-chain/src/sprout/note/ciphertexts.rs +++ b/zebra-chain/src/sprout/note/ciphertexts.rs @@ -1,3 +1,5 @@ +//! Encrypted parts of Sprout notes. + use std::{fmt, io}; use serde::{Deserialize, Serialize}; @@ -25,9 +27,7 @@ impl Copy for EncryptedNote {} impl Clone for EncryptedNote { fn clone(&self) -> Self { - let mut bytes = [0; 601]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-chain/src/sprout/tree.rs b/zebra-chain/src/sprout/tree.rs index ab597fc9869..2b70b0a364d 100644 --- a/zebra-chain/src/sprout/tree.rs +++ b/zebra-chain/src/sprout/tree.rs @@ -10,16 +10,19 @@ //! //! A root of a note commitment tree is associated with each treestate. -use std::{fmt, ops::Deref}; +use std::fmt; use byteorder::{BigEndian, ByteOrder}; -use incrementalmerkletree::{bridgetree, Frontier}; +use incrementalmerkletree::frontier::Frontier; use lazy_static::lazy_static; use sha2::digest::generic_array::GenericArray; use thiserror::Error; use super::commitment::NoteCommitment; +pub mod legacy; +use legacy::LegacyNoteCommitmentTree; + #[cfg(any(test, feature = "proptest-impl"))] use proptest_derive::Arbitrary; @@ -128,7 +131,7 @@ impl From<&Root> for [u8; 32] { /// A node of the Sprout note commitment tree. #[derive(Clone, Copy, Eq, PartialEq)] -struct Node([u8; 32]); +pub struct Node([u8; 32]); impl fmt::Debug for Node { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -147,12 +150,12 @@ impl incrementalmerkletree::Hashable for Node { /// Note that Sprout does not use the `level` argument. /// /// [MerkleCRH^Sprout]: https://zips.z.cash/protocol/protocol.pdf#sproutmerklecrh - fn combine(_level: incrementalmerkletree::Altitude, a: &Self, b: &Self) -> Self { + fn combine(_level: incrementalmerkletree::Level, a: &Self, b: &Self) -> Self { Self(merkle_crh_sprout(a.0, b.0)) } /// Returns the node for the level below the given level. (A quirk of the API) - fn empty_root(level: incrementalmerkletree::Altitude) -> Self { + fn empty_root(level: incrementalmerkletree::Level) -> Self { let layer_below = usize::from(MERKLE_DEPTH) - usize::from(level); Self(EMPTY_ROOTS[layer_below]) } @@ -200,16 +203,18 @@ pub enum NoteCommitmentTreeError { /// job of this tree to protect against double-spending, as it is append-only; double-spending /// is prevented by maintaining the [nullifier set] for each shielded pool. /// -/// Internally this wraps [`incrementalmerkletree::bridgetree::Frontier`], so that we can maintain and increment +/// Internally this wraps [`bridgetree::Frontier`], so that we can maintain and increment /// the full tree with only the minimal amount of non-empty nodes/leaves required. /// /// [Sprout Note Commitment Tree]: https://zips.z.cash/protocol/protocol.pdf#merkletree /// [nullifier set]: https://zips.z.cash/protocol/protocol.pdf#nullifierset #[derive(Debug, Serialize, Deserialize)] +#[serde(into = "LegacyNoteCommitmentTree")] +#[serde(from = "LegacyNoteCommitmentTree")] pub struct NoteCommitmentTree { - /// The tree represented as a [`incrementalmerkletree::bridgetree::Frontier`]. + /// The tree represented as a [`bridgetree::Frontier`]. /// - /// A [`incrementalmerkletree::Frontier`] is a subset of the tree that allows to fully specify it. It + /// A [`bridgetree::Frontier`] is a subset of the tree that allows to fully specify it. It /// consists of nodes along the rightmost (newer) branch of the tree that /// has non-empty nodes. Upper (near root) empty nodes of the branch are not /// stored. @@ -222,7 +227,7 @@ pub struct NoteCommitmentTree { /// /// /// Note: MerkleDepth^Sprout = MERKLE_DEPTH = 29. - inner: bridgetree::Frontier, + inner: Frontier, /// A cached root of the tree. /// @@ -248,7 +253,7 @@ impl NoteCommitmentTree { /// Returns an error if the tree is full. #[allow(clippy::unwrap_in_result)] pub fn append(&mut self, cm: NoteCommitment) -> Result<(), NoteCommitmentTreeError> { - if self.inner.append(&cm.into()) { + if self.inner.append(cm.into()) { // Invalidate cached root let cached_root = self .cached_root @@ -266,14 +271,9 @@ impl NoteCommitmentTree { /// Returns the current root of the tree; used as an anchor in Sprout /// shielded transactions. pub fn root(&self) -> Root { - if let Some(root) = self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked") - .deref() - { + if let Some(root) = self.cached_root() { // Return cached root. - return *root; + return root; } // Get exclusive access, compute the root, and cache it. @@ -287,13 +287,27 @@ impl NoteCommitmentTree { Some(root) => root, None => { // Compute root and cache it. - let root = Root(self.inner.root().0); + let root = self.recalculate_root(); *write_root = Some(root); root } } } + /// Returns the current root of the tree, if it has already been cached. + #[allow(clippy::unwrap_in_result)] + pub fn cached_root(&self) -> Option { + *self + .cached_root + .read() + .expect("a thread that previously held exclusive lock access panicked") + } + + /// Calculates and returns the current root of the tree, ignoring any caching. + pub fn recalculate_root(&self) -> Root { + Root(self.inner.root().0) + } + /// Returns a hash of the Sprout note commitment tree root. pub fn hash(&self) -> [u8; 32] { self.root().into() @@ -314,17 +328,34 @@ impl NoteCommitmentTree { /// /// [spec]: https://zips.z.cash/protocol/protocol.pdf#merkletree pub fn count(&self) -> u64 { - self.inner.position().map_or(0, |pos| u64::from(pos) + 1) + self.inner + .value() + .map_or(0, |x| u64::from(x.position()) + 1) + } + + /// Checks if the tree roots and inner data structures of `self` and `other` are equal. + /// + /// # Panics + /// + /// If they aren't equal, with a message explaining the differences. + /// + /// Only for use in tests. + #[cfg(any(test, feature = "proptest-impl"))] + pub fn assert_frontier_eq(&self, other: &Self) { + // It's technically ok for the cached root not to be preserved, + // but it can result in expensive cryptographic operations, + // so we fail the tests if it happens. + assert_eq!(self.cached_root(), other.cached_root()); + + // Check the data in the internal data structure + assert_eq!(self.inner, other.inner); } } impl Clone for NoteCommitmentTree { /// Clones the inner tree, and creates a new `RwLock` with the cloned root data. fn clone(&self) -> Self { - let cached_root = *self - .cached_root - .read() - .expect("a thread that previously held exclusive lock access panicked"); + let cached_root = self.cached_root(); Self { inner: self.inner.clone(), @@ -336,7 +367,7 @@ impl Clone for NoteCommitmentTree { impl Default for NoteCommitmentTree { fn default() -> Self { Self { - inner: bridgetree::Frontier::empty(), + inner: Frontier::empty(), cached_root: Default::default(), } } diff --git a/zebra-chain/src/sprout/tree/legacy.rs b/zebra-chain/src/sprout/tree/legacy.rs new file mode 100644 index 00000000000..b11e674bafa --- /dev/null +++ b/zebra-chain/src/sprout/tree/legacy.rs @@ -0,0 +1,121 @@ +//! Sprout serialization legacy code. +//! +//! We create a [`LegacyNoteCommitmentTree`] which is a copy of [`NoteCommitmentTree`] but where serialization and +//! deserialization can be derived. +//! To do this we create a [`LegacyFrontier`] which is a legacy `Frontier` structure that can be found in [1], +//! In order to make [`LegacyFrontier`] serializable we also have our own versions of `NonEmptyFrontier` ([`LegacyNonEmptyFrontier`]), +//! `Leaf`([`LegacyLeaf`]) and `Position`([`LegacyPosition`]) that can be found in [1] or [2]. +//! +//! Conversions methods to/from [`LegacyNoteCommitmentTree`] to/from [`NoteCommitmentTree`] are defined also in this file. +//! +//! [1]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/bridgetree.rs +//! [2]: https://github.com/zcash/incrementalmerkletree/blob/incrementalmerkletree-v0.3.1/src/lib.rs + +use incrementalmerkletree::{frontier::Frontier, Position}; + +use super::{Node, NoteCommitmentTree, Root, MERKLE_DEPTH}; + +/// A legacy version of [`NoteCommitmentTree`]. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename = "NoteCommitmentTree")] +pub struct LegacyNoteCommitmentTree { + inner: LegacyFrontier, + cached_root: std::sync::RwLock>, +} + +impl From for LegacyNoteCommitmentTree { + fn from(nct: NoteCommitmentTree) -> Self { + LegacyNoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +impl From for NoteCommitmentTree { + fn from(nct: LegacyNoteCommitmentTree) -> Self { + NoteCommitmentTree { + inner: nct.inner.into(), + cached_root: nct.cached_root, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Frontier")] +#[allow(missing_docs)] +pub struct LegacyFrontier { + frontier: Option>, +} + +impl From> for Frontier { + fn from(legacy_frontier: LegacyFrontier) -> Self { + if let Some(legacy_frontier_data) = legacy_frontier.frontier { + let mut ommers = legacy_frontier_data.ommers; + let position = Position::from( + u64::try_from(legacy_frontier_data.position.0) + .expect("old `usize` always fits in `u64`"), + ); + let leaf = match legacy_frontier_data.leaf { + LegacyLeaf::Left(a) => a, + LegacyLeaf::Right(a, b) => { + ommers.insert(0, a); + b + } + }; + Frontier::from_parts( + position, + leaf, + ommers, + ) + .expect("We should be able to construct a frontier from parts given legacy frontier is not empty") + } else { + Frontier::empty() + } + } +} + +impl From> for LegacyFrontier { + fn from(frontier: Frontier) -> Self { + if let Some(frontier_data) = frontier.value() { + let leaf_from_frontier = *frontier_data.leaf(); + let mut leaf = LegacyLeaf::Left(leaf_from_frontier); + let mut ommers = frontier_data.ommers().to_vec(); + let position = usize::try_from(u64::from(frontier_data.position())) + .expect("new position should fit in a `usize`"); + if frontier_data.position().is_odd() { + let left = ommers.remove(0); + leaf = LegacyLeaf::Right(left, leaf_from_frontier); + } + LegacyFrontier { + frontier: Some(LegacyNonEmptyFrontier { + position: LegacyPosition(position), + leaf, + ommers: ommers.to_vec(), + }), + } + } else { + LegacyFrontier { frontier: None } + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "NonEmptyFrontier")] +struct LegacyNonEmptyFrontier { + position: LegacyPosition, + leaf: LegacyLeaf, + ommers: Vec, +} + +/// A set of leaves of a Merkle tree. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename = "Leaf")] +enum LegacyLeaf { + Left(A), + Right(A, A), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +struct LegacyPosition(usize); diff --git a/zebra-chain/src/transaction.rs b/zebra-chain/src/transaction.rs index 0b2c25583d2..583ca9681e8 100644 --- a/zebra-chain/src/transaction.rs +++ b/zebra-chain/src/transaction.rs @@ -63,7 +63,10 @@ use crate::{ /// internally by different enum variants. Because we checkpoint on Canopy /// activation, we do not validate any pre-Sapling transaction types. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub enum Transaction { /// A fully transparent transaction (`version = 1`). V1 { diff --git a/zebra-chain/src/transaction/arbitrary.rs b/zebra-chain/src/transaction/arbitrary.rs index c218ccb6238..704a0d23cd1 100644 --- a/zebra-chain/src/transaction/arbitrary.rs +++ b/zebra-chain/src/transaction/arbitrary.rs @@ -30,7 +30,9 @@ use crate::{ use itertools::Itertools; -use super::{FieldNotPresent, JoinSplitData, LockTime, Memo, Transaction, UnminedTx}; +use super::{ + FieldNotPresent, JoinSplitData, LockTime, Memo, Transaction, UnminedTx, VerifiedUnminedTx, +}; /// The maximum number of arbitrary transactions, inputs, or outputs. /// @@ -107,20 +109,32 @@ impl Transaction { option::of(any::>()), ) .prop_map( - |( - inputs, - outputs, - lock_time, - expiry_height, - joinsplit_data, - sapling_shielded_data, - )| Transaction::V4 { + move |( inputs, outputs, lock_time, expiry_height, joinsplit_data, sapling_shielded_data, + )| { + Transaction::V4 { + inputs, + outputs, + lock_time, + expiry_height, + joinsplit_data: if ledger_state.height.is_min() { + // The genesis block should not contain any joinsplits. + None + } else { + joinsplit_data + }, + sapling_shielded_data: if ledger_state.height.is_min() { + // The genesis block should not contain any shielded data. + None + } else { + sapling_shielded_data + }, + } }, ) .boxed() @@ -157,8 +171,18 @@ impl Transaction { expiry_height, inputs, outputs, - sapling_shielded_data, - orchard_shielded_data, + sapling_shielded_data: if ledger_state.height.is_min() { + // The genesis block should not contain any shielded data. + None + } else { + sapling_shielded_data + }, + orchard_shielded_data: if ledger_state.height.is_min() { + // The genesis block should not contain any shielded data. + None + } else { + orchard_shielded_data + }, } }, ) @@ -783,6 +807,52 @@ impl Arbitrary for UnminedTx { type Strategy = BoxedStrategy; } +impl Arbitrary for VerifiedUnminedTx { + type Parameters = (); + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + ( + any::(), + any::>(), + any::(), + any::<(u16, u16)>().prop_map(|(unpaid_actions, conventional_actions)| { + ( + unpaid_actions % conventional_actions.saturating_add(1), + conventional_actions, + ) + }), + any::(), + ) + .prop_map( + |( + transaction, + miner_fee, + legacy_sigop_count, + (conventional_actions, mut unpaid_actions), + fee_weight_ratio, + )| { + if unpaid_actions > conventional_actions { + unpaid_actions = conventional_actions; + } + + let conventional_actions = conventional_actions as u32; + let unpaid_actions = unpaid_actions as u32; + + Self { + transaction, + miner_fee, + legacy_sigop_count, + conventional_actions, + unpaid_actions, + fee_weight_ratio, + } + }, + ) + .boxed() + } + type Strategy = BoxedStrategy; +} + // Utility functions /// Convert `trans` into a fake v5 transaction, diff --git a/zebra-chain/src/transaction/joinsplit.rs b/zebra-chain/src/transaction/joinsplit.rs index 0735bb6e3bc..80103b16e5e 100644 --- a/zebra-chain/src/transaction/joinsplit.rs +++ b/zebra-chain/src/transaction/joinsplit.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::{ amount::{self, Amount, NegativeAllowed}, + fmt::HexDebug, primitives::{ed25519, ZkSnarkProof}, sprout::{self, JoinSplit, Nullifier}, }; @@ -16,7 +17,7 @@ use crate::{ /// description with the required signature data, so that an /// `Option` correctly models the presence or absence of any /// JoinSplit data. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct JoinSplitData { /// The first JoinSplit description in the transaction, /// using proofs of type `P`. @@ -48,6 +49,17 @@ pub struct JoinSplitData { pub sig: ed25519::Signature, } +impl fmt::Debug for JoinSplitData

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("JoinSplitData") + .field("first", &self.first) + .field("rest", &self.rest) + .field("pub_key", &self.pub_key) + .field("sig", &HexDebug(&self.sig.to_bytes())) + .finish() + } +} + impl fmt::Display for JoinSplitData

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut fmter = diff --git a/zebra-chain/src/transaction/serialize.rs b/zebra-chain/src/transaction/serialize.rs index da6a3770bf1..dd8a8c2e4c0 100644 --- a/zebra-chain/src/transaction/serialize.rs +++ b/zebra-chain/src/transaction/serialize.rs @@ -268,15 +268,15 @@ impl ZcashDeserialize for Option> { // Create shielded spends from deserialized parts let spends: Vec<_> = spend_prefixes .into_iter() - .zip(spend_proofs.into_iter()) - .zip(spend_sigs.into_iter()) + .zip(spend_proofs) + .zip(spend_sigs) .map(|((prefix, proof), sig)| Spend::::from_v5_parts(prefix, proof, sig)) .collect(); // Create shielded outputs from deserialized parts let outputs = output_prefixes .into_iter() - .zip(output_proofs.into_iter()) + .zip(output_proofs) .map(|(prefix, proof)| Output::from_v5_parts(prefix, proof)) .collect(); @@ -427,7 +427,7 @@ impl ZcashDeserialize for Option { // Create the AuthorizedAction from deserialized parts let authorized_actions: Vec = actions .into_iter() - .zip(sigs.into_iter()) + .zip(sigs) .map(|(action, spend_auth_sig)| { orchard::AuthorizedAction::from_parts(action, spend_auth_sig) }) @@ -884,9 +884,6 @@ impl ZcashDeserialize for Transaction { } // Denoted as `nConsensusBranchId` in the spec. // Convert it to a NetworkUpgrade - // - // Clippy 1.64 is wrong here, this lazy evaluation is necessary, constructors are functions. This is fixed in 1.66. - #[allow(clippy::unnecessary_lazy_evaluations)] let network_upgrade = NetworkUpgrade::from_branch_id(limited_reader.read_u32::()?) .ok_or_else(|| { @@ -1026,8 +1023,17 @@ impl fmt::Display for SerializedTransaction { impl fmt::Debug for SerializedTransaction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // A transaction with a lot of transfers can be extremely long in logs. + let mut data_truncated = hex::encode(&self.bytes); + if data_truncated.len() > 1003 { + let end = data_truncated.len() - 500; + // Replace the middle bytes with "...", but leave 500 bytes on either side. + // The data is hex, so this replacement won't panic. + data_truncated.replace_range(500..=end, "..."); + } + f.debug_tuple("SerializedTransaction") - .field(&hex::encode(&self.bytes)) + .field(&data_truncated) .finish() } } diff --git a/zebra-chain/src/transaction/unmined.rs b/zebra-chain/src/transaction/unmined.rs index 6b953966627..da716573e8b 100644 --- a/zebra-chain/src/transaction/unmined.rs +++ b/zebra-chain/src/transaction/unmined.rs @@ -325,7 +325,6 @@ impl From<&Arc> for UnminedTx { // // This struct can't be `Eq`, because it contains a `f32`. #[derive(Clone, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] pub struct VerifiedUnminedTx { /// The unmined transaction. pub transaction: UnminedTx, @@ -337,6 +336,13 @@ pub struct VerifiedUnminedTx { /// transparent inputs and outputs. pub legacy_sigop_count: u64, + /// The number of conventional actions for `transaction`, as defined by [ZIP-317]. + /// + /// The number of actions is limited by [`MAX_BLOCK_BYTES`], so it fits in a u32. + /// + /// [ZIP-317]: https://zips.z.cash/zip-0317#block-production + pub conventional_actions: u32, + /// The number of unpaid actions for `transaction`, /// as defined by [ZIP-317] for block production. /// @@ -381,6 +387,7 @@ impl VerifiedUnminedTx { legacy_sigop_count: u64, ) -> Result { let fee_weight_ratio = zip317::conventional_fee_weight_ratio(&transaction, miner_fee); + let conventional_actions = zip317::conventional_actions(&transaction.transaction); let unpaid_actions = zip317::unpaid_actions(&transaction, miner_fee); zip317::mempool_checks(unpaid_actions, miner_fee, transaction.size)?; @@ -390,6 +397,7 @@ impl VerifiedUnminedTx { miner_fee, legacy_sigop_count, fee_weight_ratio, + conventional_actions, unpaid_actions, }) } diff --git a/zebra-chain/src/transaction/unmined/zip317.rs b/zebra-chain/src/transaction/unmined/zip317.rs index 44ef709aacd..e9f4a757e53 100644 --- a/zebra-chain/src/transaction/unmined/zip317.rs +++ b/zebra-chain/src/transaction/unmined/zip317.rs @@ -133,7 +133,7 @@ pub fn conventional_fee_weight_ratio( /// as defined by [ZIP-317]. /// /// [ZIP-317]: https://zips.z.cash/zip-0317#fee-calculation -fn conventional_actions(transaction: &Transaction) -> u32 { +pub fn conventional_actions(transaction: &Transaction) -> u32 { let tx_in_total_size: usize = transaction .inputs() .iter() diff --git a/zebra-chain/src/transparent.rs b/zebra-chain/src/transparent.rs index afea036afb6..7982468288c 100644 --- a/zebra-chain/src/transparent.rs +++ b/zebra-chain/src/transparent.rs @@ -66,7 +66,10 @@ pub const EXTRA_ZEBRA_COINBASE_DATA: &str = "z\u{1F993}"; // // TODO: rename to ExtraCoinbaseData, because height is also part of the coinbase data? #[derive(Clone, Eq, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub struct CoinbaseData( /// Invariant: this vec, together with the coinbase height, must be less than /// 100 bytes. We enforce this by only constructing CoinbaseData fields by @@ -110,7 +113,11 @@ impl std::fmt::Debug for CoinbaseData { /// /// A particular transaction output reference. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Serialize))] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub struct OutPoint { /// References the transaction that contains the UTXO being spent. /// @@ -145,7 +152,10 @@ impl OutPoint { /// A transparent input to a transaction. #[derive(Clone, Debug, Eq, PartialEq)] -#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))] +#[cfg_attr( + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) +)] pub enum Input { /// A reference to an output of a previous transaction. PrevOut { @@ -383,9 +393,10 @@ impl Input { /// that spends my UTXO and sends 1 ZEC to you and 1 ZEC back to me /// (just like receiving change). #[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary, Deserialize))] #[cfg_attr( - any(test, feature = "proptest-impl"), - derive(Arbitrary, Serialize, Deserialize) + any(test, feature = "proptest-impl", feature = "elasticsearch"), + derive(Serialize) )] pub struct Output { /// Transaction value. diff --git a/zebra-chain/src/transparent/address.rs b/zebra-chain/src/transparent/address.rs index 01b78320cc2..0faeb9216fb 100644 --- a/zebra-chain/src/transparent/address.rs +++ b/zebra-chain/src/transparent/address.rs @@ -4,7 +4,6 @@ use std::{fmt, io}; use ripemd::{Digest, Ripemd160}; use secp256k1::PublicKey; -use sha2::Digest as Sha256Digest; use sha2::Sha256; use crate::{ diff --git a/zebra-chain/src/value_balance/tests/prop.rs b/zebra-chain/src/value_balance/tests/prop.rs index 9ee2fb1e634..248824157d0 100644 --- a/zebra-chain/src/value_balance/tests/prop.rs +++ b/zebra-chain/src/value_balance/tests/prop.rs @@ -79,7 +79,7 @@ proptest! { ) { let _init_guard = zebra_test::init(); - let collection = vec![value_balance1, value_balance2]; + let collection = [value_balance1, value_balance2]; let transparent = value_balance1.transparent + value_balance2.transparent; let sprout = value_balance1.sprout + value_balance2.sprout; diff --git a/zebra-chain/src/work/difficulty/tests/vectors.rs b/zebra-chain/src/work/difficulty/tests/vectors.rs index 3a64cdec6f1..d198fd32c4b 100644 --- a/zebra-chain/src/work/difficulty/tests/vectors.rs +++ b/zebra-chain/src/work/difficulty/tests/vectors.rs @@ -457,7 +457,9 @@ fn check_testnet_minimum_difficulty_block(height: block::Height) -> Result<(), R .signed_duration_since(previous_block.header.time); // zcashd requires a gap that's strictly greater than 6 times the target - // threshold, but ZIP-205 and ZIP-208 are ambiguous. See bug #1276. + // threshold, as documented in ZIP-205 and ZIP-208: + // https://zips.z.cash/zip-0205#change-to-difficulty-adjustment-on-testnet + // https://zips.z.cash/zip-0208#minimum-difficulty-blocks-on-testnet match NetworkUpgrade::minimum_difficulty_spacing_for_height(Network::Testnet, height) { None => Err(eyre!("the minimum difficulty rule is not active"))?, Some(spacing) if (time_gap <= spacing) => Err(eyre!( diff --git a/zebra-chain/src/work/equihash.rs b/zebra-chain/src/work/equihash.rs index 731d9497afd..e8b73b1614a 100644 --- a/zebra-chain/src/work/equihash.rs +++ b/zebra-chain/src/work/equihash.rs @@ -87,9 +87,7 @@ impl Copy for Solution {} impl Clone for Solution { fn clone(&self) -> Self { - let mut bytes = [0; SOLUTION_SIZE]; - bytes[..].copy_from_slice(&self.0[..]); - Self(bytes) + *self } } diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 885b3f82554..25024287e1c 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,10 +1,19 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies"] + [features] default = [] @@ -31,41 +40,41 @@ bellman = "0.14.0" bls12_381 = "0.8.0" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" rayon = "1.7.0" -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" -once_cell = "1.17.1" -serde = { version = "1.0.163", features = ["serde_derive"] } +once_cell = "1.18.0" +serde = { version = "1.0.179", features = ["serde_derive"] } futures = "0.3.28" futures-util = "0.3.28" -metrics = "0.21.0" -thiserror = "1.0.40" -tokio = { version = "1.28.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } +metrics = "0.21.1" +thiserror = "1.0.44" +tokio = { version = "1.29.1", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } tracing = "0.1.37" tracing-futures = "0.2.5" -orchard = "0.4.0" +orchard = "0.5.0" -zcash_proofs = { version = "0.11.0", features = ["local-prover", "multicore", "download-params"] } +zcash_proofs = { version = "0.12.1", features = ["local-prover", "multicore", "download-params"] } -tower-fallback = { path = "../tower-fallback/" } -tower-batch = { path = "../tower-batch/" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.4" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.4" } -zebra-script = { path = "../zebra-script" } -zebra-state = { path = "../zebra-state" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-chain = { path = "../zebra-chain" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.28" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # Test-only dependencies -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } [dev-dependencies] @@ -76,11 +85,11 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } hex = "0.4.3" num-integer = "0.1.45" -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" spandoc = "0.2.2" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tracing-error = "0.2.0" tracing-subscriber = "0.3.17" diff --git a/zebra-consensus/src/block.rs b/zebra-consensus/src/block.rs index 80d4bea9c59..970cf4118aa 100644 --- a/zebra-consensus/src/block.rs +++ b/zebra-consensus/src/block.rs @@ -35,9 +35,9 @@ pub use request::Request; #[cfg(test)] mod tests; -/// Asynchronous block verification. +/// Asynchronous semantic block verification. #[derive(Debug)] -pub struct BlockVerifier { +pub struct SemanticBlockVerifier { /// The network to be verified. network: Network, state_service: S, @@ -100,14 +100,14 @@ impl VerifyBlockError { /// pub const MAX_BLOCK_SIGOPS: u64 = 20_000; -impl BlockVerifier +impl SemanticBlockVerifier where S: Service + Send + Clone + 'static, S::Future: Send + 'static, V: Service + Send + Clone + 'static, V::Future: Send + 'static, { - /// Creates a new BlockVerifier + /// Creates a new SemanticBlockVerifier pub fn new(network: Network, state_service: S, transaction_verifier: V) -> Self { Self { network, @@ -117,7 +117,7 @@ where } } -impl Service for BlockVerifier +impl Service for SemanticBlockVerifier where S: Service + Send + Clone + 'static, S::Future: Send + 'static, @@ -280,10 +280,10 @@ where check::miner_fees_are_valid(&block, network, block_miner_fees)?; // Finally, submit the block for contextual verification. - let new_outputs = Arc::try_unwrap(known_utxos) + let new_outputs = Arc::into_inner(known_utxos) .expect("all verification tasks using known_utxos are complete"); - let prepared_block = zs::PreparedBlock { + let prepared_block = zs::SemanticallyVerifiedBlock { block, hash, height, @@ -311,7 +311,7 @@ where .ready() .await .map_err(VerifyBlockError::Commit)? - .call(zs::Request::CommitBlock(prepared_block)) + .call(zs::Request::CommitSemanticallyVerifiedBlock(prepared_block)) .await .map_err(VerifyBlockError::Commit)? { @@ -319,7 +319,7 @@ where assert_eq!(committed_hash, hash, "state must commit correct hash"); Ok(hash) } - _ => unreachable!("wrong response for CommitBlock"), + _ => unreachable!("wrong response for CommitSemanticallyVerifiedBlock"), } } .instrument(span) diff --git a/zebra-consensus/src/block/check.rs b/zebra-consensus/src/block/check.rs index ddd3dbefa63..5f4aaa6ced6 100644 --- a/zebra-consensus/src/block/check.rs +++ b/zebra-consensus/src/block/check.rs @@ -315,7 +315,7 @@ pub fn merkle_root_validity( // // Duplicate transactions should cause a block to be // rejected, as duplicate transactions imply that the block contains a - // double-spend. As a defense-in-depth, however, we also check that there + // double-spend. As a defense-in-depth, however, we also check that there // are no duplicate transaction hashes. // // ## Checkpoint Validation diff --git a/zebra-consensus/src/block/tests.rs b/zebra-consensus/src/block/tests.rs index bad6ab40630..13e8be79cf0 100644 --- a/zebra-consensus/src/block/tests.rs +++ b/zebra-consensus/src/block/tests.rs @@ -144,7 +144,7 @@ async fn check_transcripts() -> Result<(), Report> { let transaction = transaction::Verifier::new(network, state_service.clone()); let transaction = Buffer::new(BoxService::new(transaction), 1); let block_verifier = Buffer::new( - BlockVerifier::new(network, state_service.clone(), transaction), + SemanticBlockVerifier::new(network, state_service.clone(), transaction), 1, ); diff --git a/zebra-consensus/src/checkpoint.rs b/zebra-consensus/src/checkpoint.rs index ce31ac29a77..bcd49187764 100644 --- a/zebra-consensus/src/checkpoint.rs +++ b/zebra-consensus/src/checkpoint.rs @@ -4,11 +4,11 @@ //! speed up the initial chain sync for Zebra. This list is distributed //! with Zebra. //! -//! The checkpoint verifier queues pending blocks. Once there is a +//! The checkpoint verifier queues pending blocks. Once there is a //! chain from the previous checkpoint to a target checkpoint, it //! verifies all the blocks in that chain, and sends accepted blocks to -//! the state service as finalized chain state, skipping contextual -//! verification checks. +//! the state service as finalized chain state, skipping the majority of +//! contextual verification checks. //! //! Verification starts at the first checkpoint, which is the genesis //! block for the configured network. @@ -32,7 +32,7 @@ use zebra_chain::{ parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, work::equihash, }; -use zebra_state::{self as zs, FinalizedBlock}; +use zebra_state::{self as zs, CheckpointVerifiedBlock}; use crate::{ block::VerifyBlockError, @@ -59,7 +59,7 @@ pub use list::CheckpointList; #[derive(Debug)] struct QueuedBlock { /// The block, with additional precalculated data. - block: FinalizedBlock, + block: CheckpointVerifiedBlock, /// The transmitting end of the oneshot channel for this block's result. tx: oneshot::Sender>, } @@ -68,7 +68,7 @@ struct QueuedBlock { #[derive(Debug)] struct RequestBlock { /// The block, with additional precalculated data. - block: FinalizedBlock, + block: CheckpointVerifiedBlock, /// The receiving end of the oneshot channel for this block's result. rx: oneshot::Receiver>, } @@ -265,10 +265,11 @@ where let (sender, receiver) = mpsc::channel(); #[cfg(feature = "progress-bar")] - let queued_blocks_bar = howudoin::new().label("Queued Checkpoint Blocks"); + let queued_blocks_bar = howudoin::new_root().label("Checkpoint Queue Height"); #[cfg(feature = "progress-bar")] - let verified_checkpoint_bar = howudoin::new().label("Verified Checkpoints"); + let verified_checkpoint_bar = + howudoin::new_with_parent(queued_blocks_bar.id()).label("Verified Checkpoints"); let verifier = CheckpointVerifier { checkpoint_list, @@ -580,7 +581,7 @@ where /// Check that the block height, proof of work, and Merkle root are valid. /// - /// Returns a [`FinalizedBlock`] with precalculated block data. + /// Returns a [`CheckpointVerifiedBlock`] with precalculated block data. /// /// ## Security /// @@ -590,7 +591,10 @@ where /// Checking the Merkle root ensures that the block hash binds the block /// contents. To prevent malleability (CVE-2012-2459), we also need to check /// whether the transaction hashes are unique. - fn check_block(&self, block: Arc) -> Result { + fn check_block( + &self, + block: Arc, + ) -> Result { let hash = block.hash(); let height = block .coinbase_height() @@ -601,7 +605,7 @@ where crate::block::check::equihash_solution_is_valid(&block.header)?; // don't do precalculation until the block passes basic difficulty checks - let block = FinalizedBlock::with_hash(block, hash); + let block = CheckpointVerifiedBlock::with_hash(block, hash); crate::block::check::merkle_root_validity( self.network, @@ -967,7 +971,7 @@ pub enum VerifyCheckpointError { #[error("checkpoint verifier was dropped")] Dropped, #[error(transparent)] - CommitFinalized(BoxError), + CommitCheckpointVerified(BoxError), #[error(transparent)] Tip(BoxError), #[error(transparent)] @@ -1081,36 +1085,36 @@ where // we don't reject the entire checkpoint. // Instead, we reset the verifier to the successfully committed state tip. let state_service = self.state_service.clone(); - let commit_finalized_block = tokio::spawn(async move { + let commit_checkpoint_verified = tokio::spawn(async move { let hash = req_block .rx .await .map_err(Into::into) - .map_err(VerifyCheckpointError::CommitFinalized) + .map_err(VerifyCheckpointError::CommitCheckpointVerified) .expect("CheckpointVerifier does not leave dangling receivers")?; // We use a `ServiceExt::oneshot`, so that every state service // `poll_ready` has a corresponding `call`. See #1593. match state_service - .oneshot(zs::Request::CommitFinalizedBlock(req_block.block)) - .map_err(VerifyCheckpointError::CommitFinalized) + .oneshot(zs::Request::CommitCheckpointVerifiedBlock(req_block.block)) + .map_err(VerifyCheckpointError::CommitCheckpointVerified) .await? { zs::Response::Committed(committed_hash) => { assert_eq!(committed_hash, hash, "state must commit correct hash"); Ok(hash) } - _ => unreachable!("wrong response for CommitFinalizedBlock"), + _ => unreachable!("wrong response for CommitCheckpointVerifiedBlock"), } }); let state_service = self.state_service.clone(); let reset_sender = self.reset_sender.clone(); async move { - let result = commit_finalized_block.await; + let result = commit_checkpoint_verified.await; // Avoid a panic on shutdown // - // When `zebrad` is terminated using Ctrl-C, the `commit_finalized_block` task + // When `zebrad` is terminated using Ctrl-C, the `commit_checkpoint_verified` task // can return a `JoinError::Cancelled`. We expect task cancellation on shutdown, // so we don't need to panic here. The persistent state is correct even when the // task is cancelled, because block data is committed inside transactions, in @@ -1118,7 +1122,7 @@ where let result = if zebra_chain::shutdown::is_shutting_down() { Err(VerifyCheckpointError::ShuttingDown) } else { - result.expect("commit_finalized_block should not panic") + result.expect("commit_checkpoint_verified should not panic") }; if result.is_err() { // If there was an error committing the block, then this verifier diff --git a/zebra-consensus/src/checkpoint/list/tests.rs b/zebra-consensus/src/checkpoint/list/tests.rs index 9ad1febeb7a..da07c689464 100644 --- a/zebra-consensus/src/checkpoint/list/tests.rs +++ b/zebra-consensus/src/checkpoint/list/tests.rs @@ -103,7 +103,7 @@ fn checkpoint_list_no_genesis_fail() -> Result<(), BoxError> { fn checkpoint_list_null_hash_fail() -> Result<(), BoxError> { let _init_guard = zebra_test::init(); - let checkpoint_data = vec![(block::Height(0), block::Hash([0; 32]))]; + let checkpoint_data = [(block::Height(0), block::Hash([0; 32]))]; // Make a checkpoint list containing the non-genesis block let checkpoint_list: BTreeMap = @@ -119,7 +119,7 @@ fn checkpoint_list_null_hash_fail() -> Result<(), BoxError> { fn checkpoint_list_bad_height_fail() -> Result<(), BoxError> { let _init_guard = zebra_test::init(); - let checkpoint_data = vec![( + let checkpoint_data = [( block::Height(block::Height::MAX.0 + 1), block::Hash([1; 32]), )]; @@ -131,7 +131,7 @@ fn checkpoint_list_bad_height_fail() -> Result<(), BoxError> { "a checkpoint list with an invalid block height (block::Height::MAX + 1) should fail", ); - let checkpoint_data = vec![(block::Height(u32::MAX), block::Hash([1; 32]))]; + let checkpoint_data = [(block::Height(u32::MAX), block::Hash([1; 32]))]; // Make a checkpoint list containing the non-genesis block let checkpoint_list: BTreeMap = diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 5cc13e1d3b5..dd8e688feb3 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -10667,3 +10667,348 @@ 2074108 00000000015062ecafe1e087ddc8cdca74c6fcabb7552b36b784c8e05020be80 2074332 0000000001830c15b8c76ef48cb08fe3fc5f362a6015f3bdf847933446c5a42f 2074713 0000000000b6a6f7e35eacfa84a205e2ac86da55b518d11134917f9da0778faf +2075113 0000000001a99f5cbd00ee510fbbdbbe7f72c694350045e66fbb79c7bd0af17f +2075355 000000000190bf4d0a14a1d653f16e44ed4dbbf317cc5aede4a7c45a42eddb11 +2075591 0000000000e2ac282b74d35eec430a46f159415dcee10c5baffc791410347e57 +2075830 000000000092281422f9bfa7c8a76d4d9a1f7673e7fb02631a5c4e7e911e7dfa +2076040 000000000019993f0bdb9370fe9712b77004682cb6c7c55821a1b39dea2f5a8b +2076239 000000000183d8c7e5a83c6645d9a8b6121192902fc68277e256dbeec175799f +2076465 000000000045ee10334a4f89e0c94f739b2b9e01f42f0fc3cd8e83583dd4eacb +2076621 00000000014975b2eeef5ddfeb41e4f35b703ed8e62817c709a2a12084dfc182 +2076909 0000000000ac0d6bb6b12c413e98554e67e08255548c2da1182700321336a4ff +2077156 00000000010dbf97b3209b05eabc9f01f089bd97d48622b36773b4c331382ed1 +2077291 00000000007d51fb3e413ddd36719e650ec4a1c727a30aa5380ad8af810d6654 +2077422 0000000000625df0e029adbd29103627b9425e770bc95fd5fc7bc0e1c2670b71 +2077550 00000000001bcba0aaa8a9587f3b97f5c387241dc10fb1b518b629b3ba9c6f5d +2077660 0000000000a6c3a14f93012e5106bde5120ea3a87eb233c5ffc277d0c5cbf0f3 +2077775 0000000001c2f9bec36c0903d8aebb10aee022f6c038827b0989c12c03b308c1 +2077898 0000000001282ebbff0a872d53231cebc08b43f82cf49568fbdb33996a6bd0d2 +2078041 000000000102cf8ca8bebd5a0c211369ab15a9bfc487c9cb5463a5beb8a7dbb4 +2078158 00000000005c84fae7637880208b1a5e56fe90010e1bbf8077de8a6805221610 +2078284 00000000013e02d5696bf80ebd6953a9d0b890d6ced8b5c7034c15016f5abd2b +2078402 0000000000737730069b08e3d1f59006e754045a0e2a643516930e7a12bb3601 +2078499 0000000000c1c6c6d7dadbfde9b3d52151aba05bf6053e1eb0ef4daa41ebe4b1 +2078604 0000000000bbc8474aabe166b367f053ab3ccefaf1c9f922e747c6e20deea7d4 +2078703 0000000000193a5c667ae00e51465ddefd6ad7ec9130db8d9909ba27bd88c5cf +2078813 000000000127c5c3016530ee95d7fbd826580600c1fd50ffb71190dc60e3572e +2078943 000000000133ff5ea8249786e58b9aa1f4eb4fee0abf8552e0f670396622710b +2079111 000000000019ed9cfc533b0b8307b2b69974b602afbcb9c458aea978bddc3a92 +2079277 000000000076658b08cea75506e7b3302a5b99cdefbb76292fa28fc67b98b52b +2079380 00000000015a2a71549a3fe6d658cc62224de854b98b54bc6a195dae41998b10 +2079523 00000000002aca3a883235acd879b2404f37528f5957d803e4f762257f0c20a5 +2079662 0000000001d5971debe4b032b9949b8e52aa35360c089011f9b5bb4871114a63 +2079807 00000000005854f7d4ce31683f79b8612b4506990d4a03f7c5fd11f3abd0269b +2079991 00000000012744f6be1d5d352630ad60bd06b7ac303313899da55dfe4b3d92dd +2080129 0000000001ed04450f257bc8b7c95629659688c6f36dfd9185f5d67715526f9b +2080309 0000000000ad74ed21dbe8a0807729759e4c0170f6740fea30fe7d0232e3311b +2080709 0000000000e0fee4e08e761d52a6d25c36763fc1788d809b008c4286feadc0c4 +2081029 00000000014ebb5c2cd581f8b3eebacea0d75f7659db2801e9c74dc2aafa2749 +2081185 000000000142344e494c8686e0828ca763841492a56d09f93b265c18db8eee31 +2081297 00000000017718db0074dd9903f4fb1e5a79ea81f7bca5dc55c8e9daa7a19401 +2081404 00000000003b81dd192a5ed2dcff69469bd1902b6a77edda77ae174a6f31c23e +2081514 00000000013f483f1a79d515beee205bf821ad36fecd4642e9627ea4b54fb14a +2081636 000000000054ec8a49b0b2b3638fbe5363cd62a4fceda2519d8b775580a106f7 +2081745 00000000019dd976a602f82cf79d2252f741836ef65de94d19817ceda3a9b046 +2081883 0000000000167f3797a557e26240e5ed10ff574d45d88d27bec44189e1f4a4f0 +2082013 00000000010dafd4f31b09d4f70a38e76fc1a40a67497ab4b0af35139bb972a2 +2082139 000000000180fa80db5c37e49ac748dda9adc81e97004eed1c67d898de2f408c +2082272 000000000093a93c48fbb4759f494599805206dd7db1e6cc483928e156b0c985 +2082403 000000000109103341c173547c6a6696025ed8d3b6d5c3b81e0fff73f62ddce2 +2082554 0000000001287ae5df7f6ba6497325863a956fd9482c3811d85e684f6b785cda +2082686 0000000000628e50012d4582a199a35ea7b13730ee315e327eb1548e1aeffcc4 +2082845 0000000000fea04ea60477b21314b0dbceefec88c269b968adf5fb6a68e69179 +2082969 0000000001cfadc95ac815fa8d4dc3210e844584744e69122a566f84155847ae +2083104 00000000013e581f16c4715f39d43f61c5db974664ee2cb1437134c9238a0b22 +2083229 000000000085e08c8f8a33889a7a6fb3a48aec7c2955b435e6d74df6f4d9caf3 +2083332 000000000136e02aa8e0d6bd970c9a3b7ef0cfdf3dc8bcc3ecfb32a5ecedd03f +2083435 000000000185e9c7af88403485a12a760574f3ae0479c671219f5a8c2dd1fbea +2083573 00000000012602f9a5270af537e0aad4fd955c08116ba65ae01071cf23e5b743 +2083706 0000000000b425d837a7091c4c94b16c8f634fe1f8616c6dbcc32605879c39a6 +2083811 0000000000acf4ccbfd1649a8fedd5cb5bc3ff3e84c4f6dd63782fd72d161990 +2083941 00000000009c2a22fe12fc1c7a37d47ed7c63753e1b492c82562cc7f62d7b15b +2084074 00000000003ecf1222c6b8786dfb7ebd9bcee5110fd84d02823f2ea7839db06a +2084195 0000000000c1c49ae460a1a6a06b6a7f76f7f7c49a98752f3ad2d52002e3349b +2084326 0000000000b9faee1b460f0053868ed7b7444566943780ffea4aedba153c7bfa +2084432 00000000014399d32f8b4c918e0a27ab2d9adc8e59541c0faf6f26e7e04304f7 +2084558 0000000000391f7fc515fd02df4ad7a9b87038ebf9a2f7838bfa4f1501cff24d +2084695 00000000006c2526054bcd1e3d770d5175b565f964ebcac3b6cda84495905a23 +2084839 00000000009a17158b0a1ed2b8301dc41fd0d3f79f7b248685e606383dd2d746 +2084965 000000000083a29f9b23c35799d15f31879fc3e7c4e14f99d971ed0ae47a4318 +2085087 000000000174e61ccb20f8b1a2e0cbc14e4ea4b7355daa4257ae77a90b9b3824 +2085222 0000000000ff69aee2b7e4ab0ce84a78220c2e238ae4cd4b7c7fe80b42d78a91 +2085302 0000000000a3c808c1bd1a2ce000bb2dae84ccf2bf61f7efb316cc8df74e14d8 +2085413 0000000001a411687105ec5e6df08a408094b6efeccdad9637d05f559705a750 +2085577 000000000036144af8d64b6eda6f7b6c3a0e072bfc7414a0fec80f74346a30e0 +2085719 0000000000f5e597f032c5c4c314bea90b5a615fedcd343a66d9b190c4b30728 +2085880 0000000000c2fd91ea1a788befa956d96191122f52ca7dd40e112b7fc1d368b7 +2086000 00000000010f5c6fb2c3a27c4c89f1d3c80ffb9478d5138dbf89d8c46705ab44 +2086114 0000000000e0609fe931e181ddfea66d5a156ab443fc9ce0c0323cd8a73c1715 +2086279 0000000000801ee3c97e79cfb0328673bb402927b199059cc01fc9078aee3a02 +2086411 00000000005825160e58a707808118d1c53c3b5fb1a7b23c4a71b587e77218ba +2086539 0000000000a6b8b89ce3753e5426912a3302b5f0815333eb34ac13f90892d9ca +2086652 000000000032c4aee2b456017f5c44ece27dfe6f82ce793d6a2c8e6cf9705181 +2086755 00000000008b381f7aa4911d580c7e9925f9547b5b035e49fb3b6052c3ffd208 +2086905 0000000001052632e3bb2bc5c1fb399b99ee42d0204d31bf42fec607705d6b1b +2087034 0000000001474b9758fab2f332507c27190707d4dd25a22ddb868cc46ae654fa +2087133 000000000111733ff1eb1398e0fc424ee0a790f48f4255ead39921f67aa6784c +2087243 000000000064babe446f9f017692c4014f57e7fe7ac3af54d8e0bb080b601a06 +2087398 00000000004a5564c138e812b337ca04688a8cdb986e605c625a68397cffe782 +2087520 0000000000658ce81447a526e4ab0cb86fe290d5c91b438b5045fe296dc2fc2b +2087648 0000000001912acf62fdb254caec219475bf19de2968530d6bae14e5abd6e132 +2087786 0000000000a776a8153c5d8d5df0c46f3dbdb06e0842176845cd189303cd0634 +2087891 00000000002ccb4b12628cbff1b098dc97067419dc4e9d6fb080a0332c853c46 +2088044 0000000000f6a45be7b62776431b2d9b8af4fc3ac41777d8e6641ef6c91a56f2 +2088195 000000000002a33003d8425848a941e31f271f52175297ce7b620ccc3e59d1bd +2088324 0000000001417d7d06bdbe473761c8682604892e59977e892e729a46ddc0ab46 +2088468 0000000000fbb79d003db0a0b4daf4b3e58127f2940565b03d05544bcfc9c922 +2088594 00000000013bac8323b8061d16f665010dfe67c3ec9f468cb9e61580e04b79c7 +2088726 0000000000fb4583ce7b2175a27ccb7e4aed7d79d81708cdb33c3a5b65c869e0 +2088848 0000000001958630b3397d77b058a659e47ef03a6c30c548f3817f618d6274ec +2089011 000000000111f5a56df6d45411256d7cc54ff69ff3d9e66b6c99b5980143fea8 +2089136 0000000000995a0fd722fe9521b641914cd42b87a0b3114856025514405c465b +2089290 00000000014f205ee8c866b48777bd5b8423161143c3304eb32680942a87bb46 +2089444 00000000005aade414a8e456d9726c6537136eff19fda583108fc6621244e0fe +2089551 0000000000e9c3ee19aa6145ba94c3f59e3c0396ee621baf6129b3528f539c12 +2089667 0000000000b1ea2da31d5d8eb8a18c621f6bc2c79fe95ecde1964af19c2b95eb +2089795 00000000006c9c29a3ebbbc0a88bed7e8a292fd004dbb4d8a31a6e7f48d3a881 +2089921 00000000001e842aea64af9109cc5fccde5452ceeaa01ce0d931ef43ff3c04f0 +2090048 000000000189b07ece1910e7cd1df564cad6d198b90bdde937ba8d0d8c0fb0cc +2090198 00000000001ca7557b6317bc1424714d64dedf382527ce5c2e522e063a773091 +2090321 000000000081e3efad4e1a4ca570adff25257c91504e1446a85601e97ed07b14 +2090491 000000000057c842f1b7c2872a23dd2b7a070980184f566d55614d89edf48b3c +2090619 00000000001d01eec1878379670550483e7b79c139f22d08179f09cf5e6f4be6 +2090714 00000000014056f035e3d7f93a92e2e761b8c39f155dbb965307e6ed9bb170e5 +2090850 000000000145aff813301f53126ab43e12322a39de94ef6b5c6415da041f3758 +2090979 000000000170349ba82f090f52557c75bc9af3194acd01b51fbf9c06b44f030c +2091126 0000000000c01ce110634c6e639a31a685453090b8068bb89368f2bb9eca1120 +2091251 0000000000d1c7eb27f2827306534733eb99f176a0c9d5dd438cd23938ef680c +2091367 000000000121421e33b4f6f7deb178eb367a88a42fa4cb128bebb5bcc8b53827 +2091494 000000000090ee533f3170cced652435525a92461da39f8a48e3109699fcc664 +2091642 0000000001741cb8f3fe51bc7d128cd54f796b4b5190872a230b2d5317675603 +2091743 000000000155855fa11342133b6fcfd5d9994739f456559b846ea5e19f4035de +2091862 0000000000b97d6804bc58ca8aadf678614f5a05885ff992ec39fdbfa500cbcf +2091969 000000000026c91a63f379118acd5b3b9ef4e5b63732d28ae7e4374a9efa539b +2092094 000000000005ed59479c82647e54020ede65c39a1fc8644c76c2ad47906bbec9 +2092243 00000000016d798087735a6adcfc45f1b453c99f3e815287b3eddde288a6f47f +2092354 0000000000df2e13976590ca0de726838139c3a4d26decea868797ceff8925b0 +2092442 00000000019fa546714a01c2c98cac576ede6c9b7245bfc06e30aa79132097e5 +2092577 00000000006fc88b68bcb75e2b9183cb65604d0297d095fcc5d1580d8696d26a +2092727 00000000003745c16340b006689f9f9beb68c5f05d06ab041a866205a1da0b99 +2092830 000000000109c548e333cc58a86278cf7a1e6fbdb956ce0077a250a9f8706b90 +2092947 000000000135e5a70a02cf995623c7e529fff35400dde4eddf5797c9a3aee732 +2093093 0000000000f9458e958ca596df0af857057e02b9c498055de0aa4ad531bf9b16 +2093209 000000000025246750e066373b65649a45b11304a18520f63c52b5db520eb063 +2093353 00000000010cef5c3ba1f1943596ce9e0a88937f0425b1c569e618133b695b4c +2093475 000000000127a92e27e42a379870f5ec6eef37272523537436ee50d284df35ba +2093599 0000000000f7478cad724e166935715a65536587b651445bd63185ba9f03681d +2093704 00000000001f605497e26424f85c9d0e7fe50d448bbbded57893536b137dc810 +2093847 00000000007a2b5cfa3d3b75d83a2e088706327dadcda59e7dbe7f461e1db70f +2093984 000000000147c1590105c0b33e4c5acb5a6c0e3cd0b97b4290578b641feb64de +2094130 0000000000e62e23345cbe34b2e8f9e670b1867512a363afed387ac44b25ca0c +2094272 0000000001081abca01a5593db3a73d7c15c34bf80b8c24a4f6c22a6c585c2d5 +2094400 00000000001d0e52398c31bd5e65306c4c98450633423e47a05657cb19970a97 +2094602 0000000001bb52a477ba7a6dcaa5b69edf249b193cfffb48dfbc7d58b5d81a3f +2094832 00000000019cb9d5f6dbd810a4024a519310fb5245fe571efcb1636411db050a +2095050 00000000012b4df2ac3c737227560b507437049fd3cf54f460f912e452ba7881 +2095276 00000000007d87573801bb0a1ebb0b62bcfa41920a48da68f36af0dd73163cd4 +2095507 000000000100405f5523ac56768cb001b7f1150f06b5b776a7e6fc5aae6b5f35 +2095768 00000000010c7c5d98a49d0ffc78309f461732b688a6f8f38883d54b5715eff2 +2095996 0000000000f9c91ffc58ba89159128a057ba0bd42c17416926a17f66529cabea +2096252 00000000015aa2059a7f4d8ae0d0e2c7830c887a46d2e888a28c4725d18e89ff +2096467 000000000098c095d51fb23327f2eef50af0ccd992c7c22fe82ad5fb494141f4 +2096660 0000000000d2fa4dbd07c6bf86768a7862d54b7dc9dd763ce3db9654e0eedef6 +2096903 0000000000e8fc62e98ad1c279523f04a55b8ad432c69bf2994e247e28aa551f +2097179 00000000010f68b4ca057a42199459d1bf7462c7d0e84faec16416081b3f02f8 +2097435 0000000000547ee4b69300de21a1ecba0b9c9a31f6d49c8e574cf549c41df924 +2097686 00000000005606ecf6d0f8a76f0c49b909cf8dc21092146501303ab1e8bedfae +2097908 00000000013e314a2da8a5c37fad8f66bb93549ee723fe64d5d2cd1789068a27 +2098115 0000000000619d8ebebc9c5e314ef50ed424680f72640393ae4cddb06787cbb5 +2098356 00000000006a5bd6a2cf39d29936a2fc3f24ef0b96ab6a3bf34fb30e0bb0fca1 +2098639 00000000016cbe6be35a7e51a5f7af4f61ddcac80cc091a220fc367e3383410c +2098824 000000000159d8583b4e5bb535705bf5bc87fa9c945aab9869c0a4c3b3855e71 +2099031 0000000000b2f71cff88dcb2733aed5295b8df299493ff376ab0c208a667c7ef +2099206 00000000004c580fa34990eef81ea528e76e2edcab313ee817afd14428c99124 +2099443 00000000013ee542bf98e6a5bbfdaefc63e80cc2f352d369f0b8c1c421b9f503 +2099688 0000000000bfdc4c16a54ac414edb19d0ff954a4e660e085aaf2c3ee04752ba2 +2099909 00000000008d1bb1394d7eb061d24d09313535c5390835faf8736c29413db3c9 +2100132 00000000008d8bc2ba2bab2ab1ec144ea90ae6eea9fc53e2625be5c2a2f31af7 +2100338 0000000000b7182364fab1c4d886c7320f760727fcd6bdc3579ec89c2bfdcae3 +2100563 0000000001709e775eb92dc1bb3947b43a2cebd6a1aa1f8361ca9171ee7e568b +2100799 00000000007a830c89fc58233dd9dcd920178e18e7ecefb7b7c41abad6ef63b3 +2101027 0000000000ba306e1b028fc7e6c9b596e5aea4c0e03deb5a261fd285b7807c1d +2101249 000000000147292eb7a34e69d7ada2434d17acf883edb97b480d8131a67e6695 +2101488 0000000000bb967770e76aa56783ecf9811be8509cee1185fe5e3ce459c430c3 +2101706 000000000064a94c632d64928781a822de69bca875fb2d728af2b8c6d76de035 +2102003 000000000006729c11fbd2b36cf45cef154b5be993271d13164d0535a6b28084 +2102282 00000000016a24b3ecd9a5645768ab60cacd3dba95ed66c969823cf7e064f644 +2102557 0000000001400d652c5013ad285d1c2091a281e8a50f46998d991ec941044b0e +2102794 000000000175190d14b451705b876ab43e1c0178aa58b40d9a0fd7a75f7c2d86 +2103089 0000000000124f0adb813216722c1d9a0f426e1a7db2d701800994f8474a5948 +2103399 000000000007ba597b2a6a5786a04b903ea59fa5d351b5330f650ac386e408f7 +2103668 0000000000bcf023e1f9325995fa9c7420e95d82354c9b70ec56760d4a12fd86 +2103935 00000000015705e5a3ab8b2602678411356198cb3a6bc961a601d796cc61e833 +2104217 000000000101c6a29a3e520ee6371acd728a4284c08ca37a943879b1f2550953 +2104503 00000000008426c5a699a586790038e3bd5cf5f02bf1558e5ace5a0c06acfd1b +2104771 00000000001c27faa1701f94afd0edfa4527b2464ca6cd243f597a52b2338b0a +2105042 000000000181438c35b05daa85e68e7c94987234b56619d0fdbbd35f30de88a7 +2105343 0000000000d7fb4c5a7b89a4bdec72c766c9cbbb30657164b7aaef5b12bb48f9 +2105640 0000000000edeca93d6da6f0d2165025328fd590a95fa13fa3ee1e944c42bbc9 +2105949 0000000000be17c29160f95f978bfdd44d3e9b0801fe5c15a370ef339634fd5e +2106261 0000000001cd0274c7b1e750eaeb56523228e3daa50933d3b5b1e4ab24c04c24 +2106595 000000000046b741edf7f7314ef6c62d571622a4c7257c050124d2e34df4b89a +2106871 00000000016ec9bc1eca28390b20e26a30a45588826ea986e1f8c3f550a435bd +2107132 0000000001642153d541f9744667aeacc50731682938bafaa2ee2ef0ca837bbc +2107416 0000000001c27104d4f31a26b484f7fb334b0e84907c6415a1342b11a6e0fdad +2107740 000000000169f8a49f66287ed3c2bf41df12592a5dc525b336622de12c79d0e9 +2108033 0000000001318d782ef724c07192781b0d69906a3ff1a570eebd239d41fa3b0d +2108322 000000000137548212186de70d9be0960655dd856620ab46d8459c32e557f294 +2108618 0000000000e9e4a451fd409324a7790496787ec829aee266cf076c1d16daac39 +2108909 0000000000c49cdc19e6d24a74905799219392bd8fd6232d0934d62b6d95e9d8 +2109225 00000000007b7daf13baac8d9eec989c559cbb9b3af7dd2a2f9096f4ffe20982 +2109509 00000000013ea4b51437a7d29d3eba586de862563535b59ea60b7dfc6609930c +2109811 00000000009ab0ab6fd7b5fb3e978d2d27e6e0351bb04b1ae9ef1e3fca705415 +2110101 00000000001221d61b8bd5178692a8c35418098b1920fb0470e447de1a2b8a38 +2110416 0000000001baef0c680fb91ffab2de7db11b0a57aab546325493e6bbc32bfc95 +2110816 0000000001be2f3576ea68beec4757c14446f2044d78681492b318aca9c89d8d +2111216 000000000046f9457ce05ad5c9d87a7d6de5f0d526290f2f71dc4feb723d579c +2111616 000000000122596e9a9897ed9130aeff0ec850e078ef65f623e5626414b7f0c9 +2112016 00000000011c14231f9405b2c4dddb7049677f85197af9059f0fb09ed8868d3f +2112416 0000000001b569f523b60e13d853da0a0db6e49859ba8abdca4cabdf03f01a5c +2112816 0000000000ae445d0053cf26a0cfd85202f606b7ef032c6faacf017f0e4f965d +2113216 0000000000dbd1b82b8c5156be270db65bf3ae45130b5a6f1874914f15041d20 +2113616 0000000000b45acbdc3ed6703ce8e31479907e43f1cccaebe9e651b0a3d0058e +2114016 00000000010e8dab09722bb9bc75abe949b72492158b528a2db60bc09c247c3a +2114416 000000000075e1dca1b3775083de165418f3aae99405e3df3e2826ab1e4e609c +2114816 0000000000f700ef27222c61e255cbc44b867b59a157b930b6c6d502c87a872d +2115216 000000000038d59f2b7571905bca2bf9ca67564f5dc90ae9b53859431108e5a5 +2115616 0000000000a2ac4c4c3270c9c57236407fe4d74053a940e701fcd9c4e3b8d1a3 +2116016 00000000019282be257a52518559a9c66cc3963b1c45c2dcfc5d3a84e8d3b9cc +2116416 0000000001826392c47f07bf16cece3ddef30bbe434e5514f7baa05615ae5a82 +2116816 000000000039630d200d3ff912e5e745eb5e994d14dbd69c405c7e2eeba7e9cb +2117216 0000000000b722d83cc8568b94bb9765509c45302a48f7f9251262854e816137 +2117616 0000000000b91c891557df28d4173766562cc455b3b5ab27e83c9a03958bbc14 +2118016 00000000013eb4b6e1cd5b9f19ad032670cad97fd4837b1dd7e876358ff8752a +2118416 0000000000a5e8d0c81b4fb1036d94ac7d16d192bd068258d3aa07fe903b8736 +2118816 00000000003fb9615f739bad0dac026ed1c0f7861330737c4b55d292da4d981f +2119216 0000000000b5e61882e7e4d69c75f335c96a6c1744e8d4602e77caa46f9ee187 +2119616 0000000000d8509fe19c295db85f6001031816dc05aff2674b95925b88c8242f +2120016 0000000000dc0337c69742d294267dd2bd434017827151d6ae7965ec19fd3cef +2120416 00000000010b5fb468a019e2b9115aa0e0ec8cb92017195a4ebd4e9b5a47c6be +2120816 0000000001206742e94d9b3cb8187b4c26be13290724ef97848c62f7d01e90bb +2121216 0000000000b2531dd904338ddf602c87ac70ec14e7aca566d297dff7278648ab +2121616 0000000000b5fca10a6ff18c158d38b83369405362f97f5de4c9bf2cfd12b23c +2122016 000000000121e68a6ab027b23fb9a5e73eb38fa6af0fef88931af48dafc71821 +2122416 0000000000ee1cb21aa6d9b957578ef6f3e45b6730ce7c6e22edfa729d3301f9 +2122816 0000000000fcc1ef9a8d0e4b71f55947fd094ac9254ee0f21e2531eec099a538 +2123196 0000000000c38a681500f237539b08b8d3f75d9ab0233e2b5252b76ddc4727d9 +2123582 0000000000c4014be9b89ef009959a45e4fb48c074881c7afe1780b760853127 +2123932 0000000000e4c93f99189deadd479ecabd3660de1041ebb4a0c5ef5d7cbe5e51 +2124280 000000000127f2b6c0c0ab4048b16116e559dc9b9a934fdbd7810e1ae49b5349 +2124602 0000000001bc3445533dfc7baf59b6294ea1d4585ee928ec18c79b6b49f3dabf +2124906 00000000001e2edad0443cb8d4df1da641c3c58f2f83893e361fa37fd121c29d +2125219 0000000001280e8b6a0642a896b7b5337aac71c543cc36b26d2d296ead4af381 +2125509 00000000001d565ed9c555b1b276dccaaa87a3c0bbed390a340c799776e37be0 +2125805 00000000017827e42bf3b99f40c6ee11c8d4e56dabb802ad56e74c766a31ae2c +2126109 00000000014e149e7bbed108446e842a5c25e400423074ca891cd88c16e18bb1 +2126422 00000000005bf996c990b6124d3a93f50dd6a8403104be774a659f0c4a1ee54c +2126703 00000000010b6fb36760137accc7926c3d8c314307816f15f84c63eefdded7a8 +2127017 00000000012e0ba6b5a5f0a4ff5310b931c063098f3e96fc997c7b6fb44a24ff +2127320 000000000116fa60015d9f21754b07269703129fb4af00a7b33f7b0fb5c128bc +2127612 0000000000df367879d160aab3f1c3834462110de823b7c2e1407014b49f5544 +2127899 00000000004731b6685e37ccead8caf23b7c1b229aab4407376a07766ea1871b +2128194 00000000013eeadbf973df717320aa63ec2b3e81f0b19e521c37db25ce2ad630 +2128515 00000000002b17686a2aa58e52433e4a11fddd1172020e1464e91ba54d6bef29 +2128803 00000000017166558e5f022e46f2a476c67c69f9963f48951f421ab37426e3a4 +2129111 000000000136b194b3e7bcacf1a0222a8c7f6d3f739e42fb7db2b9ebcf1b6332 +2129418 0000000000ade562bdb165aa21fbefcc0d2e655e213e5ddf0b2bc5459c0b53c7 +2129741 0000000000408733f90084aad38ffa5a356d9f220e40ad077178d492e96ee696 +2130039 00000000015295051bce1c94530d5c8341f51b7aeabed721c26024088acc033e +2130324 000000000047140460766777a3cc6ce71bccf3d2d1aeff7b74936f21cc9f666f +2130628 000000000010fafc22180689562f6447240af96dc3645a666d88655a15509758 +2130915 0000000000ff00e5f8d3608e0549e680f32cb9eca3fe15eab2f1b43f8f5f1f38 +2131206 00000000007e0b7952afbd83aa3f8bbf5277eb025a8d7c130f750b9a75cdef40 +2131495 000000000060944b74e2badfc81d0043c4d97577450193a72c9f640bb8925b57 +2131813 0000000000eb90d10f092764603621bdc345875631ce08106e7bc5cdbea23902 +2132122 0000000000fe437b14ce7ad15b01d31f85076d84331ac0fefad44dbe81246e48 +2132410 0000000001768b04e8620bfd1de919e9ae09b04c0a962b158b106a33986b9aa8 +2132711 00000000007aadf626595d2e46ecff42d199b02849e7815fb4ab499e902b7923 +2133000 00000000012bd3092c628405bd45bd3c3ddfd9d7f282c5099047ec456a83d4dd +2133300 0000000001affcdb85411d7d2edaae4ece70497edd418b6ac5a6b61471401d69 +2133604 0000000000cbe948c19907c592e4e2704ddb155c1da1cd3a2e6db5ebc8451494 +2133905 000000000157943224c2fc9672f4456dd5babf2fd7404077d85f9136d54fe067 +2134228 0000000000663b7abc789a86bbe3cb0a8fbe5be67c74302e9d6abeda775abd98 +2134528 0000000000449198effd2777d6a2157e94916e317b13eedda1b833c633cbdfb0 +2134835 00000000006ba2705c7eaafcc186ccad35b7f934da0c765e757f76e151137b27 +2135171 00000000010460ae8510ece826a72703f77ff0e7a463e33378c22e136f8152ea +2135462 0000000001195d894fd61b555ace3d99a6c1c124d985880279d0435263941135 +2135769 000000000054b8e03388a56b73a9652f3ff23e87ade79993c76cf6d65397e353 +2136070 0000000000d350786b28b662a544fd929f02dd778b46bf73c0944bc6b0b39e2a +2136396 00000000012230ee900503937205d1b6a6899128801a75b4b1d584f3c13e2fd4 +2136700 00000000002ae376a9bf93e1909594728aebda019466440037e75d3083e0b7e7 +2137028 00000000006023df4efc2a79131181cd46109b4bd788256ad10662edabbad5d1 +2137357 000000000057627e27490f20ff6290004762a698d7d69f9d818c2df2777d9282 +2137686 0000000000f52577e5e8392873b1206ccce3d4ea25360d297d3c3476dbd982de +2138018 000000000006e84370babab79c13faa64113afb4386a92375983d3a7987619ca +2138392 00000000010a100e0d11eabd1692eac1cb93989d2cd03b355e5b0240f77cf978 +2138792 00000000001c6417d7df1be185d6b0ec0657703eebb22e68a418a565da99dbad +2139192 00000000009943cee98c43f5d391769eff1a1f89f4b2aa8f61f1ca0b7a1a035e +2139592 0000000000598a978f9fb352a6fa920de69260096e159d68dc8d40aff3c17e92 +2139960 0000000000cc1ccb9f4b4f48a5bb8598e7205db3042f98b37eb02e3b37e6fc6b +2140206 00000000008d6f467aa6c002fe0a64c9a4fbf92421de221690f0cb653b34f646 +2140557 000000000116303502e9e57339e1a9366ad41dc8b2856ee93d0ba9325acbacea +2140935 00000000012a549d767f9baadb9d5fbc32b5731e5f17984fae0f710aa2c07e4d +2141335 0000000000aeb392feef141bdc08e20dd7e3b240e48d5a71444747f1380c1590 +2141659 000000000069f06cde1416523d991d82103ec685d6e83a523511c481488ee1a3 +2142040 000000000111b9d1034a10f455c7b8fa23c513b137978d9277c3cb228aa1b87c +2142388 00000000012dff7fc274b000fb33ce3bebeb804fbafe2d3ac192699a07f89866 +2142701 0000000000a3a77419c416ddca708cadea1f8f824b1471b9ae15d82bef3221e2 +2143001 0000000000b75101aa7213929ebb6c1cdcea847b7b6fbc5cf80f08819e921839 +2143342 000000000177e02b211712c21ee84c4318643568101ec549770a93bc39188e4c +2143638 00000000001e80d2127c27a6e9943f6289dfb63ff38c870457026af6bb60bf97 +2143939 0000000000d42517546ae61193b197dda0eed95779d905608b6d8b9c5312d3ff +2144245 000000000142361c95ae61438f7184aa13797381d4b55b40e53ea1b733fc3c61 +2144564 00000000010a68a12792eea5a16ef54d758abe202b3e5675116f799416f4aa94 +2144872 0000000000d16090f1782e5068e54f8090de1c618067461b875a9ed634599678 +2145166 0000000001eff76b65bee186f9ee3a8334a8aaddc26592f1adc13dcb54fc4dd5 +2145479 000000000130119c71832a87a08b20ab3ebe14c27d45fa4771e03741804b0ca3 +2145753 000000000030dc3f97729f24a349776dd0fb3d76791daa2546008018bef34d4a +2146049 0000000000bc378dd8cb00ffc2ed5903334f139435d4836e00b433662f4f8745 +2146356 0000000001cf420569c9dc8e1d53043245fe041fc03a42ebb7029cd5d96ccc1f +2146681 000000000184c12fe3bfe8f2a9e07f32ef069d1ccd0e1e37e593e11defcc9f81 +2146962 00000000011ef518062c4bf693fbbc5a9a8cf14070dadf0e034455c950d2cbc4 +2147278 0000000001433728c12320b61be87dbf5064ce53c5d0c2ec0d88849606ac791d +2147579 0000000000c6baafb5f6d4505f9df1531695a4ef162627fb74dfba790f889bf1 +2147887 0000000001551ae09945147c25bae678d0ba160e3fdd5526dab400c6e7f15d0a +2148184 000000000016e7048f77880ee061ce62210461d0f16d9bacb977a313bb7a6a79 +2148497 00000000018e77540b791666e462d03fe2092064730e13e5dc4412cfaf8054a0 +2148798 000000000015ca4de3c9f8eee7a5db5e5743039ddcc794a9ab63898ccdac4eac +2149107 0000000000b95386a0dcf5ea3378ea4a71a8d47504dec77525fc30abc372329e +2149402 000000000129f21442d711334026047792418d7552ac15446f804e65e503520c +2149682 00000000011366ca792e91c4c568e3d8739d97c1a385ef6bfed90a477a9622d6 +2149994 00000000019ebc82fa134540b12d44baf296a7de847012aff9f6d7984dd59f8e +2150295 0000000000f338ec2ee075c53165dd3c2075340053f29117ce560858cbcb89ea +2150628 000000000104a1912842bac89d50faeeb95d6381085365a98a74a6ffc5df5916 +2151028 00000000018b4d5632fe0fecd4b39df78dfd5c0baa9718b8540d8e20a7ac5a44 +2151428 00000000006dad2c3e9e63da48eb994eeea239c2e2ead76129d9092ae0da8611 +2151828 0000000000e482a079287b9a742fccbd8fd15a0cdde94207214a2923e1717030 +2152228 00000000018e17d2ad8f040334355580cf8d94217b50c2d3a0a32223fe2455eb +2152628 00000000010d95f51d45c376c436fc478d15b5f19043ae9b877554edd6e955ae +2153028 0000000001ed3f035ff96ff981bd1baf7105e8ceac2ccbb08d484ce09fea84f2 +2153428 000000000056e97ed55b0ecad057d20e5d1945655dbfa945352efc68080fb106 +2153828 00000000004a6c97362e2022c443ff1676e7f385834eed887e6dea4a967f5f9c +2154228 00000000004e30690838777e4bdd9594c07c9859d09c831e182ac41c803ba4dd +2154628 000000000158f8dd48314777079df1f7f4b40f03c80bc7ff79325e3ec9795c7d +2155028 00000000006a566ab3d31064dbbacaa7c4f9d0cde9a2151379ad8eb82b5c22b7 +2155428 00000000019d5b3b490aad9d696f73ce6e2c9dcc5aaa0f59d895037f0c42464c +2155828 00000000013fda74b17fe46eb349312cc8641e4e4cc8e82c7461a0c5dde9942f +2156228 00000000002a5dcecbc9fc649a00bd369b3614a2966e31dd8f6f0486d5423f95 +2156628 000000000063e00e765733cbf1fa8f91b3918704f8f7f5f2164e7c4db93db0ab +2157028 00000000011ad7748b6ad9f296bebc2fd5d5fd2471c3957538a05e12442e5220 +2157428 000000000229fb466e7d68465f58a0a107291164f98c4a8aa473b61b27bc58bb +2157828 000000000029febeb9e9ff7308a46dc08b7cc6de7b8525f6e71b40611b7eb8a7 +2158228 000000000132364ef3ce63c688d7c89bd2d81f9db403f688013783d231ec77db +2158628 0000000000747ce4c907c89887de3084bd7987bf82da755b99c27ea5003591d8 +2159028 00000000009f9607df64f4a64f09405c34ed470683711ddad307fca1fcbfe242 +2159428 0000000000024d54b83f05fd6a7a4a61abab1b1491560e4175b6590b99fb9792 +2159792 000000000022521614534df6b3640b8ee5e83481223f17dc98b16eb209a51aa1 +2160160 000000000160c7dc4b42f0b2df00563adc885f9a540912f25251a8de8cdda6a8 diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index 979f082390c..be02a5648a7 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -5827,3 +5827,249 @@ 2330400 008669568ece470d9864735b490caf727b68b74e757f162dd0ad3718123dfea8 2330800 007e9deec86c3d54574fed9a49e758dedfbb764cef3f74c19df64a0045596020 2331200 004580f2f264ff7157f510c680b4efe51bf1ca78e3fd551609e0a77bd60de4f2 +2331600 0009b548a3b16906c4d46672ef6beb0f3226307eecdadfba2582a6268954040a +2332000 00aa72040fa3145ba80541e37f4f6e9f70db71e0afe1e7a2b295f587d2bc0af8 +2332400 00139b8e079e992cd8ba70bf3d7244cd2be3f256702983dae000eaa935bd14e9 +2332800 0099e2baf3e1643694d85b94c0b5c33bc674af14599dd7bd000916977d12b51d +2333200 008abbcb1bac2807ba498201768b3822eac4e0e78252a8b4393e0b5f9054379c +2333600 002e725d23d6968f9c92781a65e0d459351e672272b626573ee0622f9bdef682 +2334000 0035c24576f6f50e63bd8de5a41a7d89f0dd7f30038131b44c723ce07c0338d1 +2334400 000f0523d5a9d678a910043b9381c7b2af2168d149c95488ba0ef373902d09cd +2334800 009e4d9a4c04b2a9ab7bc7fb5bab1285dc74814a36ddc244d294627aacee56d0 +2335200 000fa3566a2aa663b5c09126622a64ed693457813e7627a7659c58637c5c4bc3 +2335600 0161d8e382f90e119eafffc24f2144a2657431afd0f6995a3e9828e4d69330b4 +2336000 0082fec597fe97fcd30c735646f4603165d65223ebbf0a132da0b259b07a323b +2336400 0040abbdcadec76905536f8ad001a9ba6a10537d42b0ce98115f0f7e5adfb907 +2336800 003fe35636f76edc5ea3e75a36f92d03a149a85b44f428a15c8ff152f3b208e8 +2337200 00c38f200be5e9ddbdb18b0c7844b12c75f95028d472bc359b97655af822f3f5 +2337600 00de39f3f6783ecc0845028b5a08a73595895533d464d6cf0e603711a9cee770 +2338000 0005d876aaf60f9c28ef9a54ebd06bb2266d627715d7c3171198fa81809ff7c4 +2338400 00406b335801d4fa99c3c4250c13a61f69279453d82236d9787b51e0dc7530d4 +2338800 006fd53c497975438dc904aa93c365f2e0b5d9066c43acc1473622fb880ad7ee +2339200 00a31a27bdc1f593b3c625f817127b82b1075e9532dad4945e3215582cd93113 +2339600 00041c8a9bcddab0514870703fe8278255c2e92d32cc42b0cf60849efbb3d738 +2340000 002c18eedab7798ae6286a36d5e88fe076c5dd1b82309895536498d6a55c2300 +2340400 0065e036f01972b5ad0451b065a7ab2104f0e80680598f8f065dc200fca8fb02 +2340800 0088e3ec20439443bdfb329d2e79875d626ab401497467b29221b8603346d8b8 +2341200 0005ba8b7687bafe4bd5d6c4120caf2e50550ba5ac9871dc8616cee6322a6a2b +2341600 0033dccdfc3c42e3f2a2b9102d9f34c2635991a548f67702a77d2e7dbfdd2212 +2342000 005e714c96771eaec8d83cd0c4f7b59e09f46235e00e8f073c3bb8667479df87 +2342400 00655671a299d7f388239324db297b9418b01cc9212545eba77f8ad30d2f2c5e +2342800 00ec042621ba4a3ed3401fe0adb5a77318f1805c4f11fbe6b81919f920e0ddb6 +2343200 002d58735bad815988159e96baf4436de6639bbf168025a73ad36fc63de12057 +2343600 004c983227e2dcacbaa5977a1b460823bbf8bf9ef840c49ec9909fa86b35c611 +2344000 00bfe21a73d76c6ff21d4a8510cab28095961e9145ff792766c947bd84a92124 +2344400 003e68d18a9d946f310e59c0d305e6dedc65f79fab0b80e658395a7f4f994d38 +2344800 000223ee61a4906b733deb9e977bee05a4466fe405df0c89cc99e30397cd8260 +2345200 00dfcfaf90014a93dd46ffaeee52962e009fb21919c69603b27a2f62ed9b9012 +2345600 010100706849c4be8acd87b9ea422ff6cf94b1d74bb0aecfe4b9e60d02133a74 +2346000 002828b1a8d3e52bfa0e41bccffa2165e6b9ea9eb889a1931330a87aee6f6bb6 +2346400 005a52a2a7cde557512f276c0342c04939147bd1a9524c7e97aaed7f5255d55c +2346800 0095cbac48a6546ad31398e5372411e09ff9da184a82ec244f1a0ffeaa4d0394 +2347200 0078cd2181201db4a98a0dead70d1bd2f30d419425a5584e46aa791b6d4a7bfe +2347600 00329c81a460483493209cbf6ec41bb09deb1c1b7b6b65f8ec26aafda24a87c9 +2348000 0006d3cbfc8f0c441fd4edc9f53b0df7bc56c4d0f68e2c0e20aeec4a62e5ba17 +2348400 0082d919c612628ffc19129767c9f2040dfb9affcfac70b97daf031df68c3b42 +2348800 0051896f03230319b16947b723ceac4a3ee83d8ee51053f2008e5fb4fc3697d5 +2349200 0081d630671395b578e78f9c5ab2e86d9882aff8dafe3d90d74a909c80e47788 +2349600 00572ca27cd2c286e3a2a60f446cb7245fa6abaddb54f5baabd04843a7f700ef +2350000 003069053a60727191c6b37d3895682242b964889c5634ae4d348dc00c4619dc +2350400 01248ca3b868fc3108a909277037d9a567a275d6f30103faf375b87d54e5b29f +2350800 002f764225f023895da74fb99c34fe639c98df5c20d0cdebcc1914d82712c061 +2351200 004f24fd7a90d1b35c2b2d9ea299d3487566385bb18835e0849cc99e46e67189 +2351600 003c60f452959cca109c4fee5a848ad174f7f25a61d5e799b729da38721f5297 +2352000 00983f53efe520e91c8faefa5c92e8d601a9165ecf127d7cfe31ebe7c8fb86f7 +2352400 004961d0674de9009f77fe929382851b79f842acbacefe36654d48d26b5f93db +2352800 0021ba495ad27eab30ce8be12a9b75bb12ba70fc6d1435e051954066c46a069f +2353200 000e774789bf9600e46f489c1aaebfa9fa2e5c7ea50915c17c8cbd225a788f73 +2353600 00921d0864b9668fb2d89e32861527600bbfbd8c89cd7f1e8b4d848196494e4b +2354000 002a6729589cbe35528fe63b42168fa3ad42248b9168a00c6e76dac5693d3fd7 +2354400 0040c9fe0d48436c5c242d418e4d3e1785e2a06aeddff362e17e6bd83a1f4c02 +2354800 006f70ea8090d18b8b91b06d442f88170de92d51005edf94f595e0ddff4f693c +2355200 006c31fc1d68146f1190286a73e5230b922bb877704ef038631ebaa8b5a660ee +2355600 000dbf54a92779a7dc6abaf7af8572e4e8b8b2a200761e96acd003e4a9bfa5ea +2356000 000e1fe86ab564557cfde4a2aa4321b293b7a2b8ee74c595a6071a15206458e0 +2356400 004f8c72fa892a68ccba76b2a2a58d3950a650d7fe5e08132d523cbf7dc49d06 +2356800 000c345abfce35324d97821d9ddf2dfebee89c68641ef85fb2030f31b6b2934a +2357200 00586f1618612ae82a75a4dc8c4af2db3165c7ee7ada1cc51effdc81b7f2d588 +2357600 0002efcdbd777660634a77bd4b910344428c649bb169d1f2350341ffa189604f +2358000 016ce033b882949997208502e67986d4f72ee331b38186d30edbac7754f39248 +2358400 0036008ddbde2fa0047b61d34dd0dd4523191aa50ce5c3ee84f8bf3ebb08858e +2358800 00125c02d232037b34ccd5499e0b22ab52943561930948ad377693afed91982f +2359200 0044a5e6f02310b18b2abc57bbd09ec7934b3aa80df4fa8f81464266550cbc2b +2359600 00a484fa68a3ccc5dc55dda3d0c6a90ac0ab7b77fb7049fcaff519e3991a019e +2360000 0031d12534f35e54170ae9b8fedcd611c48f12a1be093d47bf1226e219beb418 +2360400 005963b47b255547135ca26224cc6a9b5257286f83bee1ff8512292b0f7f74bc +2360800 00822cc61eac54709c5874842ea3a6f2129d34c20022264147c1987247b4a96d +2361200 00359f742d9a06a76fee644305542517f6240b8fa86af388a574d2e1d5f12c59 +2361600 0021df1ce0a6971f3c6988933a3acc3ecf8fd94b251bc4a13d0a3ea7ffa8b20f +2362000 006d2ae23c9ae386d85cf17d8a14a54ff9e5bb49a93066098f732c644760fa5a +2362400 004856c1492a51ff1c082e9ae62e102b7f3590866f38bf243b170bdf57a1944a +2362800 001a6a6eb376abfb9dcd974b1a7e8065e353479e972334fe3fba1911f6136505 +2363200 009cf8e442dbb74299b4bc4bfd83fa43ac28c8ffb7630f55efb660bc9e5812a7 +2363600 001efe1adbde8a31e70c911be35561002bb30b48dc311920d474b1f1ac4012bf +2364000 004c11faf0bb27bb84cba0912b16fba211ec71bb4b69825f9a9a343cdebb2263 +2364400 004899370613a6893207f45da046907041ce17c06e6ac6b4ea79aa02cde132de +2364800 0028a024cc2e565b0206243b17d54655529831d70bfe6cc8c640cb64af634f78 +2365200 0094b6f96828a0295e198e32341424e2854bf6a062cc2c2e0855fb35995ff6c0 +2365600 00a52a846cbeaa6534605bb8396e3139594f5160e705f582acd3bc607d63b43f +2366000 0077758232f91645e9abaa27648b2ec5e7af9d61eaace15ff5751cdcd50a03a2 +2366400 002229c39b034e7c2b1eaf1b32b2c9f50e70559a075a7ef6ad642a61898eae0b +2366800 00315ac86628c5372f133436ea85d0c17bd5f98ed8a7b50039397eaff0334b30 +2367200 0058b2290e17df289cca7f4548bb9bb77e35fe5a0c6e5e57c8fa542c23e5f568 +2367600 0053add58a099464f321b8708c39b186b8af3a75b749de0a0caa16f9fb6bc81b +2368000 00225a4916fbc660f70e45558d4a99a602fe9cce26a2d8c1bb3356717805b174 +2368400 0064f94f5ff49088aee047af2b56619a86832bfc8266f5e136c957a7a6b7f036 +2368800 005d7aced5d77767ec25ad54ff28efb5bbdbdbb3e4ec159833212cb4a130c7fe +2369200 0005b8eb5b727e8971d467db170f9338e751bc9c7d02aefa61bac5599f83be50 +2369600 0043074760a32a7eae055124859f7f447fdbb77535b31d946d4e0ef962011065 +2370000 0000b3672c7628008574f5c0c83dd37b49eb45d353d2e578d9f47d188dcfa2c2 +2370400 001dfa30609c198411049da4a13709527289f483752e1afba516065cb939b8d8 +2370800 00124174ac647f00f6656209e0e7edb87bb76f8ba32ed3f929a0c7480bacc2fa +2371200 001dd4010f636def1bab43d7b5d621ca984712c7cd579995b0d9138c5f8bbb87 +2371600 00101a398fe99963ce0ae45f67091b8ee71673ef7ce6a16d4dd010712aca7f16 +2372000 002a414b6b69758f8408326c7b79f0607d27a95ffe2c46177c9dfc6179ee67df +2372400 00135546d02b716693f9b1c7764d30c7db7b876a4095cfd7b391f4a34f5bcaba +2372800 0007010345aa4f5cb2d4ac761d1c5b9a82ab7749aff5cbb8e881a2701fc88b11 +2373200 00180bcbc032ea60438ed1e1251ff5cbd8f728347758ed177ab56d4a8ccc7cf5 +2373600 004f6e9f158296590d25f38c46ab5fb7af52c681c2cf122c9caa1cdb651b5187 +2374000 001ad81a27ce25859c4cbe28da98b6b1f298aa460e842ebb868b6d5721cbde06 +2374400 00e5c9d7cd641388f23714dd3fc1eceb929968b908c4411fb78c3bd9ee036d61 +2374800 000bc5e9a8ee1b0ff85efcb1118386c88184c83001ac99bd0b30841bb3b0187e +2375200 002961f9c0ac851246c2a796b7569410c0b0ac0eac8c0873b7c65c511f0523ea +2375600 00a0b4d01e3bfaad57be6469ee23cd58f6fb19ed34d87358a1c8479db5ea59c4 +2376000 00071275ff1a42fac46138b1a1b5997cec976bde326d3dbe016d457e5294906b +2376400 002e111e59b3bebaf40f9038ba97327b92ace389eea95f8a5d439f97d8e43a2d +2376800 001cd77884be5224bb6fb047be86fedc29b771e31e51801b30a10a10716e10ab +2377200 00129deaddbe60261a544be644022841468741e94a0a2ae298ef1b8dde19af8f +2377600 001fe8a1f4cb96cfee6a7485c7eee913170b82b5cc6a8b44864c6fed186e9339 +2378000 001a299d0587852d9718ee4c696827d96b4168be9865805222cb515f14fbdbae +2378400 000f60d57bc9c19d171f04662ee5e06a1dca91f091c8c1f10e4acf827cb9548c +2378800 00192766442c4ecade78c79653645c5fb06cc99e26a900a5b1139dfbf470d6d0 +2379200 004391bd427bde706e2754f5a5f84024536b8d27d763c37b5ecb9261bef359b9 +2379600 0000a22e46cc27732a84126a2de66dfbe0bd31a8fcfbd314b773cdbb0624ab99 +2380000 0001cc2c2db186634b6820310aa9f7c5a1779e8027b446c966de2d4df1db119c +2380400 00138d1e655c247f4835b23bd67637a54f325074f48a6d5d8cfd198af1dd389e +2380800 0018c3e56d80300f2c933e7d605d7328706479fbbd426985669b67986aeaf241 +2381200 001eb8a8a29d3a665d7c9dd63e055950ba5f62e8cf9fee85dcaae129f72438c3 +2381600 00169effb224e50d189a3c80e1c8e20ae9ce89dec5d231f3cb4d9ad2ef188cad +2382000 003ef4a716a195588c7946a6a5fcfdac029f4674740d5aa71d7ad5c33530ca24 +2382400 0005cc2b3dead6906263c3f61f16e03b9e9418046a3bd1f44b1057e136a45513 +2382800 00462a258adde935bb9ddb900b595d340938c0a05155c6690bd0a2959b1115d1 +2383200 00187156e8329cc8f0374c7278c53a05dcc6b9fca8320c1a11eca1ea77fca05b +2383600 000b74dac7fe5f75926959a06d00a02f8bb8025766a1f484baa055dfa18d66ac +2384000 000df197d3dc51cae497b2853c6ce5ac8221f0e8fe20d6287a7d8a4cdfa6a9d9 +2384400 000af5b611047bfd10b5fdca61ff6d70a54fc0b94088f05e070877e400d7a551 +2384800 000e34fc2f2f29a8d32b04e6c8527608ca51ed3ab764d5e413ac14d86e0cc0b1 +2385200 001dd51a0f99105c819b165aa744867a05f706daf75b43fed397a61034ca150d +2385600 002373147ea295799c286bbcea88dcac18493dd7bc88c6e8afc1d649d07be0ec +2386000 000760d50b320d2143a522402f640f06774564e39b88abfe2638c4a3c5ee85c0 +2386400 000687e79efad695c376389d7b067a74cbcf7ff01d1503f40d13db6fbcc0f044 +2386800 001dba9917f4f3f03bd2113bdfb7606b374f583b26e1274234dfb014645542e1 +2387200 000f0e7482a8f65f5a7e8cc3bf9d3bc0b352c10f309a8e3048870e4a0d3f32a2 +2387600 001a75b87be98b8fc41bec67db3d5ca41c1cc73e86ad292d5498dafc8409f325 +2388000 000681e3c3dd26646a307943bb5e46abff6422681bfeb4e059ccce1d56a68b69 +2388400 001954d674bb1468c290ce6e41c9e30499c887b609a9feb5a13907744650ce2a +2388800 0026a02c112c93a3239d05f8c02f268bb9965ff149993f33ca7c821893fdd590 +2389200 00125d3b01118461734ea74ae3a0f0fe471cc2a86687cb767717f99ec702fde9 +2389600 0005876465b0416be5f26b2c3d0f89eb2575fbfb4a2739d16983f151f00e2bfb +2390000 00165f0dd4864f342c30158a6d7ecaad6eae3617388783346926fb477f69f3fe +2390400 000a864fe4954ac591be34451389367d06d70bd1cce51f35d21b4b199b55087c +2390800 0012a95faa1c1ecbc9328337827c3dd1b317b240bea483bd59bdd2f9fedf0b03 +2391200 0015984bead0ee986c0e55621f68c8c0b4b5cc0482ee4469b3179e5dfe85f5ca +2391600 006903d70ac042330094b2571789c62af43230467c8cf7982d0d76ffe6121d3e +2392000 00161e72c469aa7f8fad67138af97f4dee0d1efbcb7cdaa76979e065c908d8c3 +2392400 00016dfe99fbbc22672d52cf62fadcbdb451c1c8280dd829ff6f930833d8a905 +2392800 000690e3c90bfb4eb82b2dcdd0353d274a60a9cad309b0fec3e7458e7b65506b +2393200 000cc40350d6872b86abe8ce159ca0d8a5f275713b4d1c0d6d6db5f3f35d1d2f +2393600 000aae4b2b7d546f42fb3b73963cfd957324245ba470eebb2debf96a1e212450 +2394000 0072ce0c882d219ee14105040dac351f6b73b4735aac0ee699b0cbac8800307d +2394400 001913cae8692b3c6b645d86d0be6f43882bc11c0568017abfeee2febbc1e58c +2394800 001cffe7c7602e64a9cf6da7fa7b21ab475975b6aac94a2b4c9d4f6ac9771582 +2395200 00179a14baa1629fb4e937cdf6280375ae16312679ca27e23b236689423c0cac +2395600 0020b3e0e96d419ea0bbe1b45c1ab21440377a973d6b063368f47bf5dc4095a7 +2396000 0048d7587504d873ff5f25a419c258984acf384a3c3b7e3c29b39905f65fc610 +2396400 000296911fcca5522ecb2a00d5abb7718afc20c984c67446a1ac1255b3adbc49 +2396800 0033e251f1671667f60576fdc4736a3540a6cd6863646833b64b90294fcfa793 +2397200 0025444202d8824a1fce2f56594b5b215b41b43dab116526aa54f6fac328ec32 +2397600 0018efea056bd7e711ff110536e1fd06d72c3bcb59808ec78ecd6a8bc6f809e9 +2398000 001a8d6641c623e2217d8c8cd0ac1a4f431a096e4d4c234a23a75248e2934a98 +2398400 004e7e1176eb92c65814b208401b258149ebdbd4fc8e70356ce133ee20846412 +2398800 00104ca6428e88d1789492a302503471b8f81ec1d74dd4484621fcd836971923 +2399200 001a192b3b9c798ec182f1385e56245d8afb55698fe6e8f9ac3cbbe7c6a18a28 +2399600 0034e7c97a1a7755e9e83a69df53a7722ca3eeb7c827ca0212cff32a6ab7d224 +2400000 0014f90b82658ecce4b29d8c4e4b755324f5b75fb85b96421074c3bae652ce1c +2400400 008495070e1d0de594ecb8c74c0d79bc07c558661fe20f9492c60adff42983e7 +2400800 0009959bf6deb71a7198df94c2e607d23b887934dc0e3bd031c19b56f7df427b +2401200 0020158fed99b7bbe09e0470edc0632d05e7559cb31767f883ae8ee9dd3c3fa8 +2401600 00177c5621ac7a61674f66aa8e9015745c4b50f4c90b6b410ee9939dc7c2f136 +2402000 00018340c3a99b95f5de82c248ac47735cad3a0fbdc6006caa094db0673b38f0 +2402400 0018c3aa759949a1280b4f865c639609e82a9a1e27332264ca6f87a9c531634a +2402800 0009715feac14163c78a68f477608f0a6424efd02ffe936d7e9731241ee67078 +2403200 00058881db360bdf0245b5be0cd6bd85ef1650388a90eaa4a2f8644221b3459e +2403600 0067d8e71caef5f552904c15d8560500b810c2cce0928b630e8ac6382055ef89 +2404000 000a309d9d8ec47ed3be870aea4a03dd75822e88790229bc9bee09de86e0ebd0 +2404400 001d29fbe998d3039b6967f944f1b50e10ce70421d60d0ed87f39cf7d5a8e350 +2404800 007574d68dac9c4f91607ae1077b05c0394e9da87cad82d8efc920f752d947c0 +2405200 00184c4242fbcd575979107f6a0f9040d1873fbc48d606f6759da52c3ced87d3 +2405600 001b392f4bef65085a04ca389975b3ed42fae965d933152f1248ca1ca589e6c3 +2406000 001e4aba075ba633b8ad7f79704ae0ec486e4861482e86ce1bcd791f700ac6e9 +2406400 0018867cc75db62da321c0011d28ee3892a2b7fb55f4f88740b3a5a83cbc2e10 +2406800 00215d1a7e594637abff20c50db5e72914e2008ffe695e99b27a634c582f7457 +2407200 001f9f6d16eb0826cae423c7f5b1c9be7cab97f1f43156f9c60600f21c63106a +2407600 001245fc5f829bec3db26630ad7c9a51a1fd99669035f66ab41e7790fe7b1b9d +2408000 001b893907719478f6ae2fa9a673a6632942aeffa95b98ed20c477ada5396cb7 +2408400 00113b4a16d8e076f3dfe6250964cacc6fcdec235a685591a7264abdf92a8c36 +2408800 002fbc1b1325182ef8419bc3118e6c79d1dbed62ea665b3c7900656aba4e0a35 +2409200 001f0a8c467e598e8032c535b974c273eea839e8078c2b154724ddd132fd77af +2409600 0013cb11e27ef00c30b63509d5421f4d9e3fcae15bb58ff59cb6deddf50312dc +2410000 003562055f67b993af8e15649617dfa5ac9596ebe60f3aef82cefe757b972401 +2410400 001aa605b3120d9f312e118ff7fd4262b62e112fec65c5c9c753535223a98ff3 +2410800 0099af7fdca8353addc8baadcbde264b15660e0db61f52aaa0e7793160ead9da +2411200 000635de7abcb55bb2b93c1ab04ccb802b17d5c88f6a6efdd3480e389de9b5b2 +2411600 00113848f9b8797931dbf481c481cfbb24360575bf2a49805823cef865634916 +2412000 000f6742293ff5ef97bb9be28647500afbae23fa86896326a7393b2d6d45b310 +2412400 0013e29c30e96db9c2309e0dd1bcae2bd3fe5e8bbea455c1bcb0a7189bd3e431 +2412800 001e404f3ef35c06248169aa4f0c4a0acfea14545f064fbb8c148f6cd0884c0e +2413200 000c83c13e110c71eb729776deae6fc3bf8641dbd32cd3565ea0e386258d3833 +2413600 0000246eb0b7231fa7733128ebda3035802926787ffa37f8875ecce4de4760fb +2414000 000e73156de990023f9abedea44aa6b3fe773da8dd2a748540aaaac189f7b4c6 +2414400 000af497ba3897498f01e19375373ea445567d79603eb1c31e8087672817fb23 +2414800 001d3546ec3934694333b807ddc094ce788f123007e118098f97516758855a64 +2415200 0024b3d2b66a83dc985f478043ea92f28d4fb2e282d058b709191603c50fb0a2 +2415600 0011f73490bd8e9bc0dec7b71ae9763b51bde03808bd7c12f672ca280ccefca0 +2416000 0009a7c7a04dd18e1e9f9b3547b90051d2ff2ca49deb8501e61522d59926f4d5 +2416400 000ff62d148ece31ac95244231e0b2afc6594064b42821a6b7a6dd7f44012a67 +2416800 0001f2f6e77ddfd2b26f6effd5e69b395dbfb444375336c2fa33b8171470cd92 +2417200 000a1eb94898508f9240bb084a15d17dd339a8dc645a0f02140a760a96e348a1 +2417600 0009122adddb8203272b43f810e0b91ddee7b50023a4ad4ef3bec6cd6e8b3b82 +2418000 00b572de4fc8f36553fedd6f8052abf9ef3e23379672585dba56e51ab0d28943 +2418400 000e1452a59a48d05e696ddc6c2c855b970ad8b75d6ae27a10b89350426dc2bf +2418800 001327fa234866e2a63c5303ff72a9c2ae1a7cb62681d0418c28f308c61bd304 +2419200 0016c2fda05b563490258c503c6e77b7bb76a51d637968f1c8f768709a55f6ec +2419600 000453e2a08768d6eb42fc12e194206ef9b319e5b05aa7901ea0c26241860009 +2420000 00036ebc9345e3404b47395118cee2f30a3b1e526e2ac1309675f3a04409fd16 +2420400 00095db89b9b48847b864aa2235a864b98f5745f0c386ebcd4dc62213ff4a62d +2420800 002250914adffa2990ab8065b7a83b3e209792b40173d68ac77f1db74b87ab61 +2421200 0018a36332d5413807371cc524205aa52f3abef4497215a4a8cb554f61418ee6 +2421600 000fb66dfdde35a7b270f6059fe2d6e37839ad52518721bf26fc2c9751cd463b +2422000 000b3f76cfd75aecfa369de06ffc5cc3887dacb3b52d08401dc716864de010bb +2422400 0015215a0dbb7ff3495a358120d392955a78d228213db9a6c712517b0580d332 +2422800 000831a8b66f9b6c2843332d6a902e8d63e1fa5db82041fd333ddae0f6698b66 +2423200 00028a73f946360c3f666e6136976e8922ab3325476318eae443e5bb67089cdc +2423600 0027da308c8c2cc1b7d119e58a2667547d0ee303cfe2f79cbdf8a9dda929d240 +2424000 00061c81c151c285e03fe2858624ccf0a5b9d1978a34350874e817361fdfcdac +2424400 0017a3a30f6590348b91c38a6ec391de9c13cb165bac6df0a2be1d31d8ce64b5 +2424800 000d5f96aa63c9a43c2f93a5abad4549bc4def6791deee9988959b9c8db98525 +2425200 0016128ab597f53bd775f80b5ffd21a92c941b246c449bd224bcb5bbb43eb1e2 +2425600 0001f32b316f38b44651da105fe3f5cb7ac6f666418cc2f8112815ac41846767 +2426000 001d221d7777f6fa2c8607a98b43ef3a58958a15508a6ca8acaa244e67d8f82f +2426400 0010bc73ac2042cb1eeee66bdecda69d609e033416765efa70150337f91365f4 +2426800 000b9219464787ec2129f70fb3abaed943cf7684e182890789992cb492cfe7ae +2427200 00104b3e83562c2f114e01b277c93084d62794680208e4da6ebd6ea79d82f5fe +2427600 0119c00e0ddf4c0f69c01cd7120664128648bd39e0ed589ffd1605406d46d633 +2428000 000fe605457e5b68313b1822a1b3f76eca5f8f3044acde6870afe1e1be25504a +2428400 0019de074ee2032cece66e85144729274c7cf16a81dc89176ddc9f4617dac926 +2428800 00046f19373ffa9c354dc7a971cc1857495fb547ea808938cf93aec57f6d2120 +2429200 00087c7ee96c3358d4793e4c64b0b86b9c092b8e162192c7e15f2fd73ebb4d50 +2429600 0006481c064325f6890cf9721cf2bc768d37e177dca971d7d80c283e78d150fe diff --git a/zebra-consensus/src/checkpoint/tests.rs b/zebra-consensus/src/checkpoint/tests.rs index 66331310735..9fb29048c40 100644 --- a/zebra-consensus/src/checkpoint/tests.rs +++ b/zebra-consensus/src/checkpoint/tests.rs @@ -254,7 +254,7 @@ async fn continuous_blockchain( // - checkpoints start at genesis // - checkpoints end at the end of the range (there's no point in having extra blocks) let expected_max_height = block::Height((blockchain_len - 1).try_into().unwrap()); - let checkpoint_list = vec![ + let checkpoint_list = [ &blockchain[0], &blockchain[blockchain_len / 3], &blockchain[blockchain_len / 2], @@ -326,7 +326,7 @@ async fn continuous_blockchain( // SPANDOC: Add block directly to the state {?height} ready_state_service - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block.clone().into(), )) .await diff --git a/zebra-consensus/src/lib.rs b/zebra-consensus/src/lib.rs index cb53cedb9aa..7b8f58f9c90 100644 --- a/zebra-consensus/src/lib.rs +++ b/zebra-consensus/src/lib.rs @@ -41,8 +41,8 @@ mod parameters; mod primitives; mod script; -pub mod chain; pub mod error; +pub mod router; pub mod transaction; pub use block::{ @@ -55,7 +55,6 @@ pub use block::{ }, Request, VerifyBlockError, MAX_BLOCK_SIGOPS, }; -pub use chain::VerifyChainError; pub use checkpoint::{ CheckpointList, VerifyCheckpointError, MAX_CHECKPOINT_BYTE_COUNT, MAX_CHECKPOINT_HEIGHT_GAP, }; @@ -63,6 +62,7 @@ pub use config::Config; pub use error::BlockError; pub use parameters::FundingStreamReceiver; pub use primitives::{ed25519, groth16, halo2, redjubjub, redpallas}; +pub use router::RouterError; /// A boxed [`std::error::Error`]. pub type BoxError = Box; diff --git a/zebra-consensus/src/primitives.rs b/zebra-consensus/src/primitives.rs index 333ff1156f9..e3ab3a4f865 100644 --- a/zebra-consensus/src/primitives.rs +++ b/zebra-consensus/src/primitives.rs @@ -1,5 +1,9 @@ //! Asynchronous verification of cryptographic primitives. +use tokio::sync::oneshot::error::RecvError; + +use crate::BoxError; + pub mod ed25519; pub mod groth16; pub mod halo2; @@ -11,3 +15,37 @@ const MAX_BATCH_SIZE: usize = 64; /// The maximum latency bound for any of the batch verifiers. const MAX_BATCH_LATENCY: std::time::Duration = std::time::Duration::from_millis(100); + +/// Fires off a task into the Rayon threadpool, awaits the result through a oneshot channel, +/// then converts the error to a [`BoxError`]. +pub async fn spawn_fifo_and_convert< + E: 'static + std::error::Error + Into + Sync + Send, + F: 'static + FnOnce() -> Result<(), E> + Send, +>( + f: F, +) -> Result<(), BoxError> { + spawn_fifo(f) + .await + .map_err(|_| { + "threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?" + })? + .map_err(BoxError::from) +} + +/// Fires off a task into the Rayon threadpool and awaits the result through a oneshot channel. +pub async fn spawn_fifo< + E: 'static + std::error::Error + Sync + Send, + F: 'static + FnOnce() -> Result<(), E> + Send, +>( + f: F, +) -> Result, RecvError> { + // Rayon doesn't have a spawn function that returns a value, + // so we use a oneshot channel instead. + let (rsp_tx, rsp_rx) = tokio::sync::oneshot::channel(); + + rayon::spawn_fifo(move || { + let _ = rsp_tx.send(f()); + }); + + rsp_rx.await +} diff --git a/zebra-consensus/src/primitives/ed25519.rs b/zebra-consensus/src/primitives/ed25519.rs index 22b7f76613c..7a17ac9e14a 100644 --- a/zebra-consensus/src/primitives/ed25519.rs +++ b/zebra-consensus/src/primitives/ed25519.rs @@ -11,13 +11,16 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::ed25519::{batch, *}; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -43,7 +46,10 @@ pub type Item = batch::Item; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -120,43 +126,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning(batch: BatchVerifier, tx: Sender) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(|s| s.spawn_fifo(|_s| Self::verify(batch, tx))) - }) - .map(|join_result| join_result.expect("panic in ed25519 batch verifier")) + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning(item: Item) -> impl Future { + async fn verify_single_spawning(item: Item) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(|item| item.verify_single()) - .collect() - }) - .map(|join_result| join_result.expect("panic in ed25519 fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single()).await } } impl Service> for Verifier { type Response = (); - type Error = Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -174,7 +159,8 @@ impl Service> for Verifier { Ok(()) => { // We use a new channel for each batch, // so we always get the correct batch result here. - let result = rx.borrow().expect("completed batch must send a value"); + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; if result.is_ok() { tracing::trace!(?result, "validated ed25519 signature"); @@ -183,7 +169,7 @@ impl Service> for Verifier { tracing::trace!(?result, "invalid ed25519 signature"); metrics::counter!("signatures.ed25519.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("ed25519 verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/ed25519/tests.rs b/zebra-consensus/src/primitives/ed25519/tests.rs index 4c13a5d6fe1..0847ed08202 100644 --- a/zebra-consensus/src/primitives/ed25519/tests.rs +++ b/zebra-consensus/src/primitives/ed25519/tests.rs @@ -5,7 +5,7 @@ use std::time::Duration; use color_eyre::eyre::{eyre, Result}; use futures::stream::{FuturesUnordered, StreamExt}; use tower::ServiceExt; -use tower_batch::Batch; +use tower_batch_control::Batch; use crate::primitives::ed25519::*; diff --git a/zebra-consensus/src/primitives/groth16.rs b/zebra-consensus/src/primitives/groth16.rs index 29e325fa0eb..e6d7ad17a35 100644 --- a/zebra-consensus/src/primitives/groth16.rs +++ b/zebra-consensus/src/primitives/groth16.rs @@ -18,11 +18,10 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::{BoxedError, Fallback}; use zebra_chain::{ @@ -34,6 +33,10 @@ use zebra_chain::{ sprout::{JoinSplit, Nullifier, RandomSeed}, }; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + mod params; #[cfg(test)] mod tests; @@ -74,7 +77,10 @@ pub type ItemVerifyingKey = PreparedVerifyingKey; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static SPEND_VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -113,7 +119,10 @@ pub static SPEND_VERIFIER: Lazy< /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static OUTPUT_VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -417,43 +426,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning( - batch: BatchVerifier, - vk: &'static BatchVerifyingKey, - tx: Sender, - ) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, vk: &'static BatchVerifyingKey, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(move |s| s.spawn_fifo(move |_s| Self::verify(batch, vk, tx))) - }) - .map(|join_result| join_result.expect("panic in groth16 batch verifier")) + let _ = tx.send( + spawn_fifo(move || batch.verify(thread_rng(), vk)) + .await + .ok(), + ); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning( + async fn verify_single_spawning( item: Item, pvk: &'static ItemVerifyingKey, - ) -> impl Future { + ) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(move |item| item.verify_single(pvk)) - .collect() - }) - .map(|join_result| join_result.expect("panic in groth16 fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single(pvk)).await } } @@ -470,8 +458,8 @@ impl fmt::Debug for Verifier { impl Service> for Verifier { type Response = (); - type Error = VerificationError; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -492,7 +480,7 @@ impl Service> for Verifier { let result = rx .borrow() .as_ref() - .expect("completed batch must send a value") + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")? .clone(); if result.is_ok() { @@ -503,7 +491,7 @@ impl Service> for Verifier { metrics::counter!("proofs.groth16.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/halo2.rs b/zebra-consensus/src/primitives/halo2.rs index a4c0d47c911..e9cbc4262e6 100644 --- a/zebra-consensus/src/primitives/halo2.rs +++ b/zebra-consensus/src/primitives/halo2.rs @@ -13,13 +13,16 @@ use once_cell::sync::Lazy; use orchard::circuit::VerifyingKey; use rand::{thread_rng, CryptoRng, RngCore}; -use rayon::prelude::*; use thiserror::Error; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -199,7 +202,10 @@ impl From for Halo2Error { /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -284,43 +290,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning( - batch: BatchVerifier, - vk: &'static BatchVerifyingKey, - tx: Sender, - ) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, vk: &'static BatchVerifyingKey, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(move |s| s.spawn_fifo(move |_s| Self::verify(batch, vk, tx))) - }) - .map(|join_result| join_result.expect("panic in halo2 batch verifier")) + let _ = tx.send( + spawn_fifo(move || batch.verify(thread_rng(), vk).map_err(Halo2Error::from)) + .await + .ok(), + ); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning( + async fn verify_single_spawning( item: Item, pvk: &'static ItemVerifyingKey, - ) -> impl Future { + ) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(move || { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(move |item| item.verify_single(pvk).map_err(Halo2Error::from)) - .collect() - }) - .map(|join_result| join_result.expect("panic in halo2 fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single(pvk).map_err(Halo2Error::from)).await } } @@ -337,8 +322,8 @@ impl fmt::Debug for Verifier { impl Service> for Verifier { type Response = (); - type Error = Halo2Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -358,7 +343,7 @@ impl Service> for Verifier { let result = rx .borrow() .as_ref() - .expect("completed batch must send a value") + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")? .clone(); if result.is_ok() { @@ -369,7 +354,7 @@ impl Service> for Verifier { metrics::counter!("proofs.halo2.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/redjubjub.rs b/zebra-consensus/src/primitives/redjubjub.rs index 1f11e6625d1..94be0cdb5f8 100644 --- a/zebra-consensus/src/primitives/redjubjub.rs +++ b/zebra-consensus/src/primitives/redjubjub.rs @@ -11,14 +11,17 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::redjubjub::{batch, *}; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -44,7 +47,10 @@ pub type Item = batch::Item; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -121,43 +127,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning(batch: BatchVerifier, tx: Sender) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(|s| s.spawn_fifo(|_s| Self::verify(batch, tx))) - }) - .map(|join_result| join_result.expect("panic in redjubjub batch verifier")) + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning(item: Item) -> impl Future { + async fn verify_single_spawning(item: Item) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(|item| item.verify_single()) - .collect() - }) - .map(|join_result| join_result.expect("panic in redjubjub fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single()).await } } impl Service> for Verifier { type Response = (); - type Error = Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -175,7 +160,8 @@ impl Service> for Verifier { Ok(()) => { // We use a new channel for each batch, // so we always get the correct batch result here. - let result = rx.borrow().expect("completed batch must send a value"); + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; if result.is_ok() { tracing::trace!(?result, "validated redjubjub signature"); @@ -185,7 +171,7 @@ impl Service> for Verifier { metrics::counter!("signatures.redjubjub.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/redjubjub/tests.rs b/zebra-consensus/src/primitives/redjubjub/tests.rs index 8c29e318d65..eb32a1db898 100644 --- a/zebra-consensus/src/primitives/redjubjub/tests.rs +++ b/zebra-consensus/src/primitives/redjubjub/tests.rs @@ -7,7 +7,7 @@ use std::time::Duration; use color_eyre::eyre::{eyre, Result}; use futures::stream::{FuturesUnordered, StreamExt}; use tower::ServiceExt; -use tower_batch::Batch; +use tower_batch_control::Batch; async fn sign_and_verify(mut verifier: V, n: usize) -> Result<(), V::Error> where diff --git a/zebra-consensus/src/primitives/redpallas.rs b/zebra-consensus/src/primitives/redpallas.rs index 26f40b61ad9..5064fa817fb 100644 --- a/zebra-consensus/src/primitives/redpallas.rs +++ b/zebra-consensus/src/primitives/redpallas.rs @@ -11,14 +11,17 @@ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; use rand::thread_rng; -use rayon::prelude::*; use tokio::sync::watch; use tower::{util::ServiceFn, Service}; -use tower_batch::{Batch, BatchControl}; +use tower_batch_control::{Batch, BatchControl}; use tower_fallback::Fallback; use zebra_chain::primitives::reddsa::{batch, orchard, Error}; +use crate::BoxError; + +use super::{spawn_fifo, spawn_fifo_and_convert}; + #[cfg(test)] mod tests; @@ -44,7 +47,10 @@ pub type Item = batch::Item; /// you should call `.clone()` on the global handle to create a local, mutable /// handle. pub static VERIFIER: Lazy< - Fallback, ServiceFn BoxFuture<'static, VerifyResult>>>, + Fallback< + Batch, + ServiceFn BoxFuture<'static, Result<(), BoxError>>>, + >, > = Lazy::new(|| { Fallback::new( Batch::new( @@ -121,43 +127,22 @@ impl Verifier { /// Flush the batch using a thread pool, and return the result via the channel. /// This function returns a future that becomes ready when the batch is completed. - fn flush_spawning(batch: BatchVerifier, tx: Sender) -> impl Future { + async fn flush_spawning(batch: BatchVerifier, tx: Sender) { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // TODO: - // - spawn batches so rayon executes them in FIFO order - // possible implementation: return a closure in a Future, - // then run it using scope_fifo() in the worker task, - // limiting the number of concurrent batches to the number of rayon threads - rayon::scope_fifo(|s| s.spawn_fifo(|_s| Self::verify(batch, tx))) - }) - .map(|join_result| join_result.expect("panic in ed25519 batch verifier")) + let _ = tx.send(spawn_fifo(move || batch.verify(thread_rng())).await.ok()); } /// Verify a single item using a thread pool, and return the result. - /// This function returns a future that becomes ready when the item is completed. - fn verify_single_spawning(item: Item) -> impl Future { + async fn verify_single_spawning(item: Item) -> Result<(), BoxError> { // Correctness: Do CPU-intensive work on a dedicated thread, to avoid blocking other futures. - tokio::task::spawn_blocking(|| { - // Rayon doesn't have a spawn function that returns a value, - // so we use a parallel iterator instead. - // - // TODO: - // - when a batch fails, spawn all its individual items into rayon using Vec::par_iter() - // - spawn fallback individual verifications so rayon executes them in FIFO order, - // if possible - rayon::iter::once(item) - .map(|item| item.verify_single()) - .collect() - }) - .map(|join_result| join_result.expect("panic in redpallas fallback verifier")) + spawn_fifo_and_convert(move || item.verify_single()).await } } impl Service> for Verifier { type Response = (); - type Error = Error; - type Future = Pin + Send + 'static>>; + type Error = BoxError; + type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -174,7 +159,8 @@ impl Service> for Verifier { Ok(()) => { // We use a new channel for each batch, // so we always get the correct batch result here. - let result = rx.borrow().expect("completed batch must send a value"); + let result = rx.borrow() + .ok_or("threadpool unexpectedly dropped response channel sender. Is Zebra shutting down?")?; if result.is_ok() { tracing::trace!(?result, "validated redpallas signature"); @@ -184,7 +170,7 @@ impl Service> for Verifier { metrics::counter!("signatures.redpallas.invalid", 1); } - result + result.map_err(BoxError::from) } Err(_recv_error) => panic!("verifier was dropped without flushing"), } diff --git a/zebra-consensus/src/primitives/redpallas/tests.rs b/zebra-consensus/src/primitives/redpallas/tests.rs index 2a49b9a1dff..6ae0717d627 100644 --- a/zebra-consensus/src/primitives/redpallas/tests.rs +++ b/zebra-consensus/src/primitives/redpallas/tests.rs @@ -7,7 +7,7 @@ use std::time::Duration; use color_eyre::eyre::{eyre, Result}; use futures::stream::{FuturesUnordered, StreamExt}; use tower::ServiceExt; -use tower_batch::Batch; +use tower_batch_control::Batch; use zebra_chain::primitives::reddsa::{ orchard::{Binding, SpendAuth}, diff --git a/zebra-consensus/src/chain.rs b/zebra-consensus/src/router.rs similarity index 91% rename from zebra-consensus/src/chain.rs rename to zebra-consensus/src/router.rs index 28f490cea31..28fac00c03f 100644 --- a/zebra-consensus/src/chain.rs +++ b/zebra-consensus/src/router.rs @@ -1,6 +1,6 @@ //! Top-level semantic block verification for Zebra. //! -//! Verifies blocks using the [`CheckpointVerifier`] or full [`BlockVerifier`], +//! Verifies blocks using the [`CheckpointVerifier`] or full [`SemanticBlockVerifier`], //! depending on the config and block height. //! //! # Correctness @@ -33,7 +33,7 @@ use zebra_chain::{ use zebra_state as zs; use crate::{ - block::{BlockVerifier, Request, VerifyBlockError}, + block::{Request, SemanticBlockVerifier, VerifyBlockError}, checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError}, error::TransactionError, transaction, BoxError, Config, @@ -56,15 +56,15 @@ mod tests; /// memory, but missing slots can significantly slow down Zebra. const VERIFIER_BUFFER_BOUND: usize = 5; -/// The chain verifier routes requests to either the checkpoint verifier or the -/// block verifier, depending on the maximum checkpoint height. +/// The block verifier router routes requests to either the checkpoint verifier or the +/// semantic block verifier, depending on the maximum checkpoint height. /// /// # Correctness /// /// Block verification requests should be wrapped in a timeout, so that -/// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`) +/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`) /// module documentation for details. -struct ChainVerifier +struct BlockVerifierRouter where S: Service + Send + Clone + 'static, S::Future: Send + 'static, @@ -84,8 +84,8 @@ where /// This height must be in the `checkpoint` verifier's checkpoint list. max_checkpoint_height: block::Height, - /// The full block verifier, used for blocks after `max_checkpoint_height`. - block: BlockVerifier, + /// The full semantic block verifier, used for blocks after `max_checkpoint_height`. + block: SemanticBlockVerifier, } /// An error while semantically verifying a block. @@ -93,41 +93,41 @@ where // One or both of these error variants are at least 140 bytes #[derive(Debug, Display, Error)] #[allow(missing_docs)] -pub enum VerifyChainError { +pub enum RouterError { /// Block could not be checkpointed Checkpoint { source: Box }, /// Block could not be full-verified Block { source: Box }, } -impl From for VerifyChainError { +impl From for RouterError { fn from(err: VerifyCheckpointError) -> Self { - VerifyChainError::Checkpoint { + RouterError::Checkpoint { source: Box::new(err), } } } -impl From for VerifyChainError { +impl From for RouterError { fn from(err: VerifyBlockError) -> Self { - VerifyChainError::Block { + RouterError::Block { source: Box::new(err), } } } -impl VerifyChainError { +impl RouterError { /// Returns `true` if this is definitely a duplicate request. /// Some duplicate requests might not be detected, and therefore return `false`. pub fn is_duplicate_request(&self) -> bool { match self { - VerifyChainError::Checkpoint { source, .. } => source.is_duplicate_request(), - VerifyChainError::Block { source, .. } => source.is_duplicate_request(), + RouterError::Checkpoint { source, .. } => source.is_duplicate_request(), + RouterError::Block { source, .. } => source.is_duplicate_request(), } } } -impl Service for ChainVerifier +impl Service for BlockVerifierRouter where S: Service + Send + Clone + 'static, S::Future: Send + 'static, @@ -138,7 +138,7 @@ where V::Future: Send + 'static, { type Response = block::Hash; - type Error = VerifyChainError; + type Error = RouterError; type Future = Pin> + Send + 'static>>; @@ -224,7 +224,7 @@ where /// /// Block and transaction verification requests should be wrapped in a timeout, /// so that out-of-order and invalid requests do not hang indefinitely. -/// See the [`chain`](`crate::chain`) module documentation for details. +/// See the [`router`](`crate::router`) module documentation for details. #[instrument(skip(state_service))] pub async fn init( config: Config, @@ -232,7 +232,7 @@ pub async fn init( mut state_service: S, debug_skip_parameter_preload: bool, ) -> ( - Buffer, Request>, + Buffer, Request>, Buffer< BoxService, transaction::Request, @@ -364,24 +364,28 @@ where zs::Response::Tip(tip) => tip, _ => unreachable!("wrong response to Request::Tip"), }; - tracing::info!(?tip, ?max_checkpoint_height, "initializing chain verifier"); + tracing::info!( + ?tip, + ?max_checkpoint_height, + "initializing block verifier router" + ); - let block = BlockVerifier::new(network, state_service.clone(), transaction.clone()); + let block = SemanticBlockVerifier::new(network, state_service.clone(), transaction.clone()); let checkpoint = CheckpointVerifier::from_checkpoint_list(list, network, tip, state_service); - let chain = ChainVerifier { + let router = BlockVerifierRouter { checkpoint, max_checkpoint_height, block, }; - let chain = Buffer::new(BoxService::new(chain), VERIFIER_BUFFER_BOUND); + let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND); let task_handles = BackgroundTaskHandles { groth16_download_handle, state_checkpoint_verify_handle, }; - (chain, transaction, task_handles, max_checkpoint_height) + (router, transaction, task_handles, max_checkpoint_height) } /// Parses the checkpoint list for `network` and `config`. diff --git a/zebra-consensus/src/chain/tests.rs b/zebra-consensus/src/router/tests.rs similarity index 82% rename from zebra-consensus/src/chain/tests.rs rename to zebra-consensus/src/router/tests.rs index 308be754bdb..eb2abf1b2a3 100644 --- a/zebra-consensus/src/chain/tests.rs +++ b/zebra-consensus/src/router/tests.rs @@ -66,14 +66,18 @@ async fn verifiers_from_network( + 'static, ) { let state_service = zs::init_test(network); - let (chain_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - crate::chain::init(Config::default(), network, state_service.clone(), true).await; + let ( + block_verifier_router, + _transaction_verifier, + _groth16_download_handle, + _max_checkpoint_height, + ) = crate::router::init(Config::default(), network, state_service.clone(), true).await; // We can drop the download task handle here, because: // - if the download task fails, the tests will panic, and // - if the download task hangs, the tests will hang. - (chain_verifier, state_service) + (block_verifier_router, state_service) } static BLOCK_VERIFY_TRANSCRIPT_GENESIS: Lazy< @@ -165,15 +169,19 @@ async fn verify_checkpoint(config: Config) -> Result<(), Report> { // init_from_verifiers. // // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. - let (chain_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - super::init(config.clone(), network, zs::init_test(network), true).await; + let ( + block_verifier_router, + _transaction_verifier, + _groth16_download_handle, + _max_checkpoint_height, + ) = super::init(config.clone(), network, zs::init_test(network), true).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(block_verifier_router); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(chain_verifier).await.unwrap(); + transcript.check(block_verifier_router).await.unwrap(); Ok(()) } @@ -183,22 +191,22 @@ async fn verify_fail_no_coinbase_test() -> Result<(), Report> { verify_fail_no_coinbase().await } -/// Test that blocks with no coinbase height are rejected by the ChainVerifier +/// Test that blocks with no coinbase height are rejected by the BlockVerifierRouter /// -/// ChainVerifier uses the block height to decide between the CheckpointVerifier -/// and BlockVerifier. This is the error case, where there is no height. +/// BlockVerifierRouter uses the block height to decide between the CheckpointVerifier +/// and SemanticBlockVerifier. This is the error case, where there is no height. #[spandoc::spandoc] async fn verify_fail_no_coinbase() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(router); let transcript = Transcript::from(NO_COINBASE_TRANSCRIPT.iter().cloned()); - transcript.check(chain_verifier).await.unwrap(); + transcript.check(block_verifier_router).await.unwrap(); let transcript = Transcript::from(NO_COINBASE_STATE_TRANSCRIPT.iter().cloned()); transcript.check(state_service).await.unwrap(); @@ -216,14 +224,14 @@ async fn round_trip_checkpoint_test() -> Result<(), Report> { async fn round_trip_checkpoint() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (block_verifier_router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(block_verifier_router); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(chain_verifier).await.unwrap(); + transcript.check(block_verifier_router).await.unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service).await.unwrap(); @@ -241,20 +249,26 @@ async fn verify_fail_add_block_checkpoint_test() -> Result<(), Report> { async fn verify_fail_add_block_checkpoint() -> Result<(), Report> { let _init_guard = zebra_test::init(); - let (chain_verifier, state_service) = verifiers_from_network(Network::Mainnet).await; + let (block_verifier_router, state_service) = verifiers_from_network(Network::Mainnet).await; // Add a timeout layer - let chain_verifier = - TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(chain_verifier); + let block_verifier_router = + TimeoutLayer::new(Duration::from_secs(VERIFY_TIMEOUT_SECONDS)).layer(block_verifier_router); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); - transcript.check(chain_verifier.clone()).await.unwrap(); + transcript + .check(block_verifier_router.clone()) + .await + .unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service.clone()).await.unwrap(); let transcript = Transcript::from(BLOCK_VERIFY_TRANSCRIPT_GENESIS_FAIL.iter().cloned()); - transcript.check(chain_verifier.clone()).await.unwrap(); + transcript + .check(block_verifier_router.clone()) + .await + .unwrap(); let transcript = Transcript::from(STATE_VERIFY_TRANSCRIPT_GENESIS.iter().cloned()); transcript.check(state_service.clone()).await.unwrap(); diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index 28ecc7e0394..90c549a3b00 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -58,7 +58,7 @@ const UTXO_LOOKUP_TIMEOUT: std::time::Duration = std::time::Duration::from_secs( /// # Correctness /// /// Transaction verification requests should be wrapped in a timeout, so that -/// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`) +/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`) /// module documentation for details. #[derive(Debug, Clone)] pub struct Verifier { @@ -740,10 +740,6 @@ where orchard_shielded_data, &shielded_sighash, )?)) - - // TODO: - // - verify orchard shielded pool (ZIP-224) (#2105) - // - shielded input and output limits? (#2379) } /// Verifies if a V5 `transaction` is supported by `network_upgrade`. diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index 7e6f8fe331e..87ce0f7e3bf 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -782,7 +782,9 @@ async fn state_error_converted_correctly() { "expected matching state and transaction errors" ); - let TransactionError::ValidateContextError(propagated_validate_context_error) = transaction_error else { + let TransactionError::ValidateContextError(propagated_validate_context_error) = + transaction_error + else { panic!("should be a ValidateContextError variant"); }; @@ -866,8 +868,7 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { let verifier = Verifier::new(network, state_service); let transaction = fake_v5_transactions_for_network(network, blocks) - .rev() - .next() + .next_back() .expect("At least one fake V5 transaction in the test vectors"); let result = verifier @@ -918,8 +919,7 @@ fn v5_transaction_is_accepted_after_nu5_activation_for_network(network: Network) let verifier = Verifier::new(network, state_service); let mut transaction = fake_v5_transactions_for_network(network, blocks) - .rev() - .next() + .next_back() .expect("At least one fake V5 transaction in the test vectors"); if transaction .expiry_height() @@ -2155,7 +2155,7 @@ async fn v4_with_joinsplit_is_rejected_for_modification( }) .await; - if result == expected_error || i >= 10 { + if result == expected_error || i >= 100 { break result; } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index d5b3c9516e6..268c7c39352 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,11 +1,28 @@ [package] name = "zebra-network" -version = "1.0.0-beta.24" -authors = ["Zcash Foundation "] -license = "MIT OR Apache-2.0" +version = "1.0.0-beta.28" +authors = ["Zcash Foundation ", "Tower Maintainers "] +description = "Networking code for Zebra" +# # Legal +# +# This licence is deliberately different to the rest of Zebra. +# +# Some code in: +# zebra-network/src/peer_set/set.rs +# zebra-network/src/peer_set/unready_service.rs +# zebra-network/src/peer_set/initialize.rs +# was modified from a 2019 version of: +# https://github.com/tower-rs/tower/tree/master/tower/src/balance/p2c/service.rs +license = "MIT" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "encoding", "network-programming"] [features] default = [] @@ -23,29 +40,33 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.2.1" +bitflags = "2.3.3" byteorder = "1.4.3" bytes = "1.4.0" -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } +dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "1.9.3", features = ["serde"] } +indexmap = { version = "2.0.0", features = ["serde"] } +itertools = "0.11.0" lazy_static = "1.4.0" +num-integer = "0.1.45" ordered-map = "0.4.2" -pin-project = "1.1.0" -rand = { version = "0.8.5", package = "rand" } +pin-project = "1.1.3" +rand = "0.8.5" rayon = "1.7.0" -regex = "1.8.1" -serde = { version = "1.0.163", features = ["serde_derive"] } -thiserror = "1.0.40" +regex = "1.9.3" +serde = { version = "1.0.179", features = ["serde_derive"] } +tempfile = "3.7.1" +thiserror = "1.0.44" futures = "0.3.28" -tokio = { version = "1.28.0", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } +tokio = { version = "1.29.1", features = ["fs", "io-util", "net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.14", features = ["sync", "time"] } tokio-util = { version = "0.7.8", features = ["codec"] } tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } -metrics = "0.21.0" +metrics = "0.21.1" tracing-futures = "0.2.5" tracing-error = { version = "0.2.0", features = ["traced-error"] } tracing = "0.1.37" @@ -59,18 +80,18 @@ howudoin = { version = "0.1.2", optional = true } # tor-rtcompat = { version = "0.0.2", optional = true } # proptest dependencies -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["async-error"] } [dev-dependencies] -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" static_assertions = "1.1.0" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } -toml = "0.7.4" +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } +toml = "0.7.6" zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-network/LICENSE b/zebra-network/LICENSE new file mode 100644 index 00000000000..9862976a6ce --- /dev/null +++ b/zebra-network/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2019-2023 Zcash Foundation +Copyright (c) 2019 Tower Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/zebra-network/src/address_book.rs b/zebra-network/src/address_book.rs index 0220928071a..de05b39bd3a 100644 --- a/zebra-network/src/address_book.rs +++ b/zebra-network/src/address_book.rs @@ -3,8 +3,9 @@ use std::{ cmp::Reverse, + collections::HashMap, iter::Extend, - net::SocketAddr, + net::{IpAddr, SocketAddr}, sync::{Arc, Mutex}, time::Instant, }; @@ -14,7 +15,7 @@ use ordered_map::OrderedMap; use tokio::sync::watch; use tracing::Span; -use zebra_chain::parameters::Network; +use zebra_chain::{parameters::Network, serialization::DateTime32}; use crate::{ constants, @@ -72,6 +73,14 @@ pub struct AddressBook { /// [`OrderedMap`] sorts in descending order. by_addr: OrderedMap>, + /// The address with a last_connection_state of [`PeerAddrState::Responded`] and + /// the most recent `last_response` time by IP. + /// + /// This is used to avoid initiating outbound connections past [`Config::max_connections_per_ip`](crate::config::Config), and + /// currently only supports a `max_connections_per_ip` of 1, and must be `None` when used with a greater `max_connections_per_ip`. + // TODO: Replace with `by_ip: HashMap>` to support configured `max_connections_per_ip` greater than 1 + most_recent_by_ip: Option>, + /// The local listener address. local_listener: SocketAddr, @@ -130,7 +139,12 @@ impl AddressBook { /// Construct an [`AddressBook`] with the given `local_listener` on `network`. /// /// Uses the supplied [`tracing::Span`] for address book operations. - pub fn new(local_listener: SocketAddr, network: Network, span: Span) -> AddressBook { + pub fn new( + local_listener: SocketAddr, + network: Network, + max_connections_per_ip: usize, + span: Span, + ) -> AddressBook { let constructor_span = span.clone(); let _guard = constructor_span.enter(); @@ -141,6 +155,8 @@ impl AddressBook { // and it gets replaced by `update_metrics` anyway. let (address_metrics_tx, _address_metrics_rx) = watch::channel(AddressMetrics::default()); + // Avoid initiating outbound handshakes when max_connections_per_ip is 1. + let should_limit_outbound_conns_per_ip = max_connections_per_ip == 1; let mut new_book = AddressBook { by_addr: OrderedMap::new(|meta_addr| Reverse(*meta_addr)), local_listener: canonical_socket_addr(local_listener), @@ -149,6 +165,7 @@ impl AddressBook { span, address_metrics_tx, last_address_log: None, + most_recent_by_ip: should_limit_outbound_conns_per_ip.then(HashMap::new), }; new_book.update_metrics(instant_now, chrono_now); @@ -170,6 +187,7 @@ impl AddressBook { pub fn new_with_addrs( local_listener: SocketAddr, network: Network, + max_connections_per_ip: usize, addr_limit: usize, span: Span, addrs: impl IntoIterator, @@ -183,7 +201,7 @@ impl AddressBook { // The maximum number of addresses should be always greater than 0 assert!(addr_limit > 0); - let mut new_book = AddressBook::new(local_listener, network, span); + let mut new_book = AddressBook::new(local_listener, network, max_connections_per_ip, span); new_book.addr_limit = addr_limit; let addrs = addrs @@ -198,6 +216,14 @@ impl AddressBook { for (socket_addr, meta_addr) in addrs { // overwrite any duplicate addresses new_book.by_addr.insert(socket_addr, meta_addr); + // Add the address to `most_recent_by_ip` if it has responded + if new_book.should_update_most_recent_by_ip(meta_addr) { + new_book + .most_recent_by_ip + .as_mut() + .expect("should be some when should_update_most_recent_by_ip is true") + .insert(socket_addr.ip(), meta_addr); + } // exit as soon as we get enough addresses if new_book.by_addr.len() >= addr_limit { break; @@ -228,10 +254,11 @@ impl AddressBook { /// Get the local listener address. /// /// This address contains minimal state, but it is not sanitized. - pub fn local_listener_meta_addr(&self) -> MetaAddr { + pub fn local_listener_meta_addr(&self, now: chrono::DateTime) -> MetaAddr { + let now: DateTime32 = now.try_into().expect("will succeed until 2038"); + MetaAddr::new_local_listener_change(self.local_listener) - .into_new_meta_addr() - .expect("unexpected invalid new local listener addr") + .local_listener_into_new_meta_addr(now) } /// Get the local listener [`SocketAddr`]. @@ -239,7 +266,8 @@ impl AddressBook { self.local_listener } - /// Get the contents of `self` in random order with sanitized timestamps. + /// Get the active addresses in `self` in random order with sanitized timestamps, + /// including our local listener address. pub fn sanitized(&self, now: chrono::DateTime) -> Vec { use rand::seq::SliceRandom; let _guard = self.span.enter(); @@ -249,14 +277,16 @@ impl AddressBook { // Unconditionally add our local listener address to the advertised peers, // to replace any self-connection failures. The address book and change // constructors make sure that the SocketAddr is canonical. - let local_listener = self.local_listener_meta_addr(); + let local_listener = self.local_listener_meta_addr(now); peers.insert(local_listener.addr, local_listener); // Then sanitize and shuffle - let mut peers = peers + let mut peers: Vec = peers .descending_values() .filter_map(|meta_addr| meta_addr.sanitize(self.network)) - // Security: remove peers that: + // # Security + // + // Remove peers that: // - last responded more than three hours ago, or // - haven't responded yet but were reported last seen more than three hours ago // @@ -264,9 +294,34 @@ impl AddressBook { // nodes impacts the network health, because connection attempts end up being wasted on // peers that are less likely to respond. .filter(|addr| addr.is_active_for_gossip(now)) - .collect::>(); + .collect(); + peers.shuffle(&mut rand::thread_rng()); + + peers + } + + /// Get the active addresses in `self`, in preferred caching order, + /// excluding our local listener address. + pub fn cacheable(&self, now: chrono::DateTime) -> Vec { + let _guard = self.span.enter(); + + let peers = self.by_addr.clone(); + + // Get peers in preferred order, then keep the recently active ones peers + .descending_values() + // # Security + // + // Remove peers that: + // - last responded more than three hours ago, or + // - haven't responded yet but were reported last seen more than three hours ago + // + // This prevents Zebra from caching nodes that are likely unreachable, + // which improves startup time and reliability. + .filter(|addr| addr.is_active_for_gossip(now)) + .cloned() + .collect() } /// Look up `addr` in the address book, and return its [`MetaAddr`]. @@ -285,6 +340,45 @@ impl AddressBook { meta_addr } + /// Returns true if `updated` needs to be applied to the recent outbound peer connection IP cache. + /// + /// Checks if there are no existing entries in the address book with this IP, + /// or if `updated` has a more recent `last_response` requiring the outbound connector to wait + /// longer before initiating handshakes with peers at this IP. + /// + /// This code only needs to check a single cache entry, rather than the entire address book, + /// because other code maintains these invariants: + /// - `last_response` times for an entry can only increase. + /// - this is the only field checked by `has_connection_recently_responded()` + /// + /// See [`AddressBook::is_ready_for_connection_attempt_with_ip`] for more details. + fn should_update_most_recent_by_ip(&self, updated: MetaAddr) -> bool { + let Some(most_recent_by_ip) = self.most_recent_by_ip.as_ref() else { + return false + }; + + if let Some(previous) = most_recent_by_ip.get(&updated.addr.ip()) { + updated.last_connection_state == PeerAddrState::Responded + && updated.last_response() > previous.last_response() + } else { + updated.last_connection_state == PeerAddrState::Responded + } + } + + /// Returns true if `addr` is the latest entry for its IP, which is stored in `most_recent_by_ip`. + /// The entry is checked for an exact match to the IP and port of `addr`. + fn should_remove_most_recent_by_ip(&self, addr: PeerSocketAddr) -> bool { + let Some(most_recent_by_ip) = self.most_recent_by_ip.as_ref() else { + return false + }; + + if let Some(previous) = most_recent_by_ip.get(&addr.ip()) { + previous.addr == addr + } else { + false + } + } + /// Apply `change` to the address book, returning the updated `MetaAddr`, /// if the change was valid. /// @@ -313,7 +407,7 @@ impl AddressBook { let instant_now = Instant::now(); let chrono_now = Utc::now(); - let updated = change.apply_to_meta_addr(previous); + let updated = change.apply_to_meta_addr(previous, instant_now, chrono_now); trace!( ?change, @@ -344,6 +438,15 @@ impl AddressBook { self.by_addr.insert(updated.addr, updated); + // Add the address to `most_recent_by_ip` if it sent the most recent + // response Zebra has received from this IP. + if self.should_update_most_recent_by_ip(updated) { + self.most_recent_by_ip + .as_mut() + .expect("should be some when should_update_most_recent_by_ip is true") + .insert(updated.addr.ip(), updated); + } + debug!( ?change, ?updated, @@ -368,6 +471,15 @@ impl AddressBook { self.by_addr.remove(&surplus_peer.addr); + // Check if this surplus peer's addr matches that in `most_recent_by_ip` + // for this the surplus peer's ip to remove it there as well. + if self.should_remove_most_recent_by_ip(surplus_peer.addr) { + self.most_recent_by_ip + .as_mut() + .expect("should be some when should_remove_most_recent_by_ip is true") + .remove(&surplus_peer.addr.ip()); + } + debug!( surplus = ?surplus_peer, ?updated, @@ -406,6 +518,14 @@ impl AddressBook { ); if let Some(entry) = self.by_addr.remove(&removed_addr) { + // Check if this surplus peer's addr matches that in `most_recent_by_ip` + // for this the surplus peer's ip to remove it there as well. + if self.should_remove_most_recent_by_ip(entry.addr) { + if let Some(most_recent_by_ip) = self.most_recent_by_ip.as_mut() { + most_recent_by_ip.remove(&entry.addr.ip()); + } + } + std::mem::drop(_guard); self.update_metrics(instant_now, chrono_now); Some(entry) @@ -434,6 +554,26 @@ impl AddressBook { self.by_addr.descending_values().cloned() } + /// Is this IP ready for a new outbound connection attempt? + /// Checks if the outbound connection with the most recent response at this IP has recently responded. + /// + /// Note: last_response times may remain live for a long time if the local clock is changed to an earlier time. + fn is_ready_for_connection_attempt_with_ip( + &self, + ip: &IpAddr, + chrono_now: chrono::DateTime, + ) -> bool { + let Some(most_recent_by_ip) = self.most_recent_by_ip.as_ref() else { + // if we're not checking IPs, any connection is allowed + return true; + }; + let Some(same_ip_peer) = most_recent_by_ip.get(ip) else { + // If there's no entry for this IP, any connection is allowed + return true; + }; + !same_ip_peer.has_connection_recently_responded(chrono_now) + } + /// Return an iterator over peers that are due for a reconnection attempt, /// in reconnection attempt order. pub fn reconnection_peers( @@ -449,6 +589,7 @@ impl AddressBook { .descending_values() .filter(move |peer| { peer.is_ready_for_connection_attempt(instant_now, chrono_now, self.network) + && self.is_ready_for_connection_attempt_with_ip(&peer.addr.ip(), chrono_now) }) .cloned() } @@ -670,6 +811,7 @@ impl Clone for AddressBook { span: self.span.clone(), address_metrics_tx, last_address_log: None, + most_recent_by_ip: self.most_recent_by_ip.clone(), } } } diff --git a/zebra-network/src/address_book/tests/prop.rs b/zebra-network/src/address_book/tests/prop.rs index 9c497ad3692..732d477379b 100644 --- a/zebra-network/src/address_book/tests/prop.rs +++ b/zebra-network/src/address_book/tests/prop.rs @@ -9,7 +9,7 @@ use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::Duration32}; use crate::{ - constants::{MAX_ADDRS_IN_ADDRESS_BOOK, MAX_PEER_ACTIVE_FOR_GOSSIP}, + constants::{DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, MAX_PEER_ACTIVE_FOR_GOSSIP}, meta_addr::{arbitrary::MAX_META_ADDR, MetaAddr, MetaAddrChange}, AddressBook, }; @@ -30,6 +30,7 @@ proptest! { let address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), addresses @@ -59,6 +60,7 @@ proptest! { let address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), addresses @@ -97,6 +99,7 @@ proptest! { let mut address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, addr_limit, Span::none(), initial_addrs.clone(), @@ -119,6 +122,7 @@ proptest! { let mut address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, addr_limit, Span::none(), initial_addrs, diff --git a/zebra-network/src/address_book/tests/vectors.rs b/zebra-network/src/address_book/tests/vectors.rs index 9896a358c85..e401e6a5de3 100644 --- a/zebra-network/src/address_book/tests/vectors.rs +++ b/zebra-network/src/address_book/tests/vectors.rs @@ -11,14 +11,21 @@ use zebra_chain::{ }; use crate::{ - constants::MAX_ADDRS_IN_ADDRESS_BOOK, meta_addr::MetaAddr, - protocol::external::types::PeerServices, AddressBook, + constants::{DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK}, + meta_addr::MetaAddr, + protocol::external::types::PeerServices, + AddressBook, }; /// Make sure an empty address book is actually empty. #[test] fn address_book_empty() { - let address_book = AddressBook::new("0.0.0.0:0".parse().unwrap(), Mainnet, Span::current()); + let address_book = AddressBook::new( + "0.0.0.0:0".parse().unwrap(), + Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + Span::current(), + ); assert_eq!( address_book @@ -48,6 +55,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -64,6 +72,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -83,6 +92,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -99,6 +109,7 @@ fn address_book_peer_order() { let address_book = AddressBook::new_with_addrs( "0.0.0.0:0".parse().unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::current(), addrs, @@ -110,3 +121,64 @@ fn address_book_peer_order() { Some(meta_addr2), ); } + +/// Check that `reconnection_peers` skips addresses with IPs for which +/// Zebra already has recently updated outbound peers. +#[test] +fn reconnection_peers_skips_recently_updated_ip() { + // tests that reconnection_peers() skips addresses where there's a connection at that IP with a recent: + // - `last_response` + test_reconnection_peers_skips_recently_updated_ip(true, |addr| { + MetaAddr::new_responded(addr, &PeerServices::NODE_NETWORK) + }); + + // tests that reconnection_peers() *does not* skip addresses where there's a connection at that IP with a recent: + // - `last_attempt` + test_reconnection_peers_skips_recently_updated_ip(false, MetaAddr::new_reconnect); + // - `last_failure` + test_reconnection_peers_skips_recently_updated_ip(false, |addr| { + MetaAddr::new_errored(addr, PeerServices::NODE_NETWORK) + }); +} + +fn test_reconnection_peers_skips_recently_updated_ip< + M: Fn(crate::PeerSocketAddr) -> crate::meta_addr::MetaAddrChange, +>( + should_skip_ip: bool, + make_meta_addr_change: M, +) { + let addr1 = "127.0.0.1:1".parse().unwrap(); + let addr2 = "127.0.0.1:2".parse().unwrap(); + + let meta_addr1 = make_meta_addr_change(addr1).into_new_meta_addr( + Instant::now(), + Utc::now().try_into().expect("will succeed until 2038"), + ); + let meta_addr2 = MetaAddr::new_gossiped_meta_addr( + addr2, + PeerServices::NODE_NETWORK, + DateTime32::MIN.saturating_add(Duration32::from_seconds(1)), + ); + + // The second address should be skipped because the first address has a + // recent `last_response` time and the two addresses have the same IP. + let addrs = vec![meta_addr1, meta_addr2]; + let address_book = AddressBook::new_with_addrs( + "0.0.0.0:0".parse().unwrap(), + Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + MAX_ADDRS_IN_ADDRESS_BOOK, + Span::current(), + addrs, + ); + + let next_reconnection_peer = address_book + .reconnection_peers(Instant::now(), Utc::now()) + .next(); + + if should_skip_ip { + assert_eq!(next_reconnection_peer, None,); + } else { + assert_ne!(next_reconnection_peer, None,); + } +} diff --git a/zebra-network/src/address_book_updater.rs b/zebra-network/src/address_book_updater.rs index d839ebbb1af..ef503bc8d82 100644 --- a/zebra-network/src/address_book_updater.rs +++ b/zebra-network/src/address_book_updater.rs @@ -51,6 +51,7 @@ impl AddressBookUpdater { let address_book = AddressBook::new( local_listener, config.network, + config.max_connections_per_ip, span!(Level::TRACE, "address book"), ); let address_metrics = address_book.address_metrics_watcher(); @@ -58,14 +59,12 @@ impl AddressBookUpdater { #[cfg(feature = "progress-bar")] let (mut address_info, address_bar, never_bar, failed_bar) = { - let address_bar = howudoin::new().label("Known Peers"); - - ( - address_metrics.clone(), - address_bar, - howudoin::new_with_parent(address_bar.id()).label("Never Attempted Peers"), - howudoin::new_with_parent(address_bar.id()).label("Failed Peers"), - ) + let address_bar = howudoin::new_root().label("Known Peers"); + let never_bar = + howudoin::new_with_parent(address_bar.id()).label("Never Attempted Peers"); + let failed_bar = howudoin::new_with_parent(never_bar.id()).label("Failed Peers"); + + (address_metrics.clone(), address_bar, never_bar, failed_bar) }; let worker_address_book = address_book.clone(); @@ -98,19 +97,17 @@ impl AddressBookUpdater { let address_info = *address_info.borrow_and_update(); address_bar - .set_pos(u64::try_from(address_info.num_addresses).expect("fits in u64")) - .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); + .set_pos(u64::try_from(address_info.num_addresses).expect("fits in u64")); + // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); let never_attempted = address_info.never_attempted_alternate + address_info.never_attempted_gossiped; - never_bar - .set_pos(u64::try_from(never_attempted).expect("fits in u64")) - .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); + never_bar.set_pos(u64::try_from(never_attempted).expect("fits in u64")); + // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); - failed_bar - .set_pos(u64::try_from(address_info.failed).expect("fits in u64")) - .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); + failed_bar.set_pos(u64::try_from(address_info.failed).expect("fits in u64")); + // .set_len(u64::try_from(address_info.address_limit).expect("fits in u64")); } } diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 14f46ec4ff2..402ee6fc4cc 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -2,6 +2,8 @@ use std::{ collections::HashSet, + ffi::OsString, + io::{self, ErrorKind}, net::{IpAddr, SocketAddr}, string::String, time::Duration, @@ -9,21 +11,29 @@ use std::{ use indexmap::IndexSet; use serde::{de, Deserialize, Deserializer}; +use tempfile::NamedTempFile; +use tokio::{fs, io::AsyncWriteExt}; +use tracing::Span; use zebra_chain::parameters::Network; use crate::{ constants::{ - DEFAULT_CRAWL_NEW_PEER_INTERVAL, DNS_LOOKUP_TIMEOUT, INBOUND_PEER_LIMIT_MULTIPLIER, - OUTBOUND_PEER_LIMIT_MULTIPLIER, + DEFAULT_CRAWL_NEW_PEER_INTERVAL, DEFAULT_MAX_CONNS_PER_IP, + DEFAULT_PEERSET_INITIAL_TARGET_SIZE, DNS_LOOKUP_TIMEOUT, INBOUND_PEER_LIMIT_MULTIPLIER, + MAX_PEER_DISK_CACHE_SIZE, OUTBOUND_PEER_LIMIT_MULTIPLIER, }, protocol::external::{canonical_peer_addr, canonical_socket_addr}, BoxError, PeerSocketAddr, }; +mod cache_dir; + #[cfg(test)] mod tests; +pub use cache_dir::CacheDir; + /// The number of times Zebra will retry each initial peer's DNS resolution, /// before checking if any other initial peers have returned addresses. /// @@ -71,9 +81,64 @@ pub struct Config { /// testnet. pub initial_testnet_peers: IndexSet, + /// An optional root directory for storing cached peer address data. + /// + /// # Configuration + /// + /// Set to: + /// - `true` to read and write peer addresses to disk using the default cache path, + /// - `false` to disable reading and writing peer addresses to disk, + /// - `'/custom/cache/directory'` to read and write peer addresses to a custom directory. + /// + /// By default, all Zebra instances run by the same user will share a single peer cache. + /// If you use a custom cache path, you might also want to change `state.cache_dir`. + /// + /// # Functionality + /// + /// The peer cache is a list of the addresses of some recently useful peers. + /// + /// For privacy reasons, the cache does *not* include any other information about peers, + /// such as when they were connected to the node. + /// + /// Deleting or modifying the peer cache can impact your node's: + /// - reliability: if DNS or the Zcash DNS seeders are unavailable or broken + /// - security: if DNS is compromised with malicious peers + /// + /// If you delete it, Zebra will replace it with a fresh set of peers from the DNS seeders. + /// + /// # Defaults + /// + /// The default directory is platform dependent, based on + /// [`dirs::cache_dir()`](https://docs.rs/dirs/3.0.1/dirs/fn.cache_dir.html): + /// + /// |Platform | Value | Example | + /// | ------- | ----------------------------------------------- | ------------------------------------ | + /// | Linux | `$XDG_CACHE_HOME/zebra` or `$HOME/.cache/zebra` | `/home/alice/.cache/zebra` | + /// | macOS | `$HOME/Library/Caches/zebra` | `/Users/Alice/Library/Caches/zebra` | + /// | Windows | `{FOLDERID_LocalAppData}\zebra` | `C:\Users\Alice\AppData\Local\zebra` | + /// | Other | `std::env::current_dir()/cache/zebra` | `/cache/zebra` | + /// + /// # Security + /// + /// If you are running Zebra with elevated permissions ("root"), create the + /// directory for this file before running Zebra, and make sure the Zebra user + /// account has exclusive access to that directory, and other users can't modify + /// its parent directories. + /// + /// # Implementation Details + /// + /// Each network has a separate peer list, which is updated regularly from the current + /// address book. These lists are stored in `network/mainnet.peers` and + /// `network/testnet.peers` files, underneath the `cache_dir` path. + /// + /// Previous peer lists are automatically loaded at startup, and used to populate the + /// initial peer set and address book. + pub cache_dir: CacheDir, + /// The initial target size for the peer set. /// - /// Also used to limit the number of inbound and outbound connections made by Zebra. + /// Also used to limit the number of inbound and outbound connections made by Zebra, + /// and the size of the cached peer list. /// /// If you have a slow network connection, and Zebra is having trouble /// syncing, try reducing the peer set size. You can also reduce the peer @@ -89,6 +154,26 @@ pub struct Config { /// next connection attempt. #[serde(with = "humantime_serde")] pub crawl_new_peer_interval: Duration, + + /// The maximum number of peer connections Zebra will keep for a given IP address + /// before it drops any additional peer connections with that IP. + /// + /// The default and minimum value are 1. + /// + /// # Security + /// + /// Increasing this config above 1 reduces Zebra's network security. + /// + /// If this config is greater than 1, Zebra can initiate multiple outbound handshakes to the same + /// IP address. + /// + /// This config does not currently limit the number of inbound connections that Zebra will accept + /// from the same IP address. + /// + /// If Zebra makes multiple inbound or outbound connections to the same IP, they will be dropped + /// after the handshake, but before adding them to the peer set. The total numbers of inbound and + /// outbound connections are also limited to a multiple of `peerset_initial_target_size`. + pub max_connections_per_ip: usize, } impl Config { @@ -144,9 +229,21 @@ impl Config { } } - /// Resolve initial seed peer IP addresses, based on the configured network. + /// Resolve initial seed peer IP addresses, based on the configured network, + /// and load cached peers from disk, if available. + /// + /// # Panics + /// + /// If a configured address is an invalid [`SocketAddr`] or DNS name. pub async fn initial_peers(&self) -> HashSet { - Config::resolve_peers(&self.initial_peer_hostnames().iter().cloned().collect()).await + // TODO: do DNS and disk in parallel if startup speed becomes important + let dns_peers = + Config::resolve_peers(&self.initial_peer_hostnames().iter().cloned().collect()).await; + + // Ignore disk errors because the cache is optional and the method already logs them. + let disk_peers = self.load_peer_cache().await.unwrap_or_default(); + + dns_peers.into_iter().chain(disk_peers).collect() } /// Concurrently resolves `peers` into zero or more IP addresses, with a @@ -161,6 +258,7 @@ impl Config { warn!( "no initial peers in the network config. \ Hint: you must configure at least one peer IP or DNS seeder to run Zebra, \ + give it some previously cached peer IP addresses on disk, \ or make sure Zebra's listener port gets inbound connections." ); return HashSet::new(); @@ -196,6 +294,10 @@ impl Config { /// `max_retries` times. /// /// If DNS continues to fail, returns an empty list of addresses. + /// + /// # Panics + /// + /// If a configured address is an invalid [`SocketAddr`] or DNS name. async fn resolve_host(host: &str, max_retries: usize) -> HashSet { for retries in 0..=max_retries { if let Ok(addresses) = Config::resolve_host_once(host).await { @@ -225,6 +327,10 @@ impl Config { /// /// If `host` is a DNS name, performs DNS resolution with a timeout of a few seconds. /// If DNS resolution fails or times out, returns an error. + /// + /// # Panics + /// + /// If a configured address is an invalid [`SocketAddr`] or DNS name. async fn resolve_host_once(host: &str) -> Result, BoxError> { let fut = tokio::net::lookup_host(host); let fut = tokio::time::timeout(DNS_LOOKUP_TIMEOUT, fut); @@ -233,11 +339,6 @@ impl Config { Ok(Ok(ip_addrs)) => { let ip_addrs: Vec = ip_addrs.map(canonical_peer_addr).collect(); - // if we're logging at debug level, - // the full list of IP addresses will be shown in the log message - let debug_span = debug_span!("", remote_ip_addrs = ?ip_addrs); - let _span_guard = debug_span.enter(); - // This log is needed for user debugging, but it's annoying during tests. #[cfg(not(test))] info!(seed = ?host, remote_ip_count = ?ip_addrs.len(), "resolved seed peer IP addresses"); @@ -260,6 +361,13 @@ impl Config { Ok(ip_addrs.into_iter().collect()) } + Ok(Err(e)) if e.kind() == ErrorKind::InvalidInput => { + // TODO: add testnet/mainnet ports, like we do with the listener address + panic!( + "Invalid peer IP address in Zebra config: addresses must have ports:\n\ + resolving {host:?} returned {e:?}" + ); + } Ok(Err(e)) => { tracing::info!(?host, ?e, "DNS error resolving peer IP addresses"); Err(e.into()) @@ -270,6 +378,196 @@ impl Config { } } } + + /// Returns the addresses in the peer list cache file, if available. + pub async fn load_peer_cache(&self) -> io::Result> { + let Some(peer_cache_file) = self.cache_dir.peer_cache_file_path(self.network) else { + return Ok(HashSet::new()); + }; + + let peer_list = match fs::read_to_string(&peer_cache_file).await { + Ok(peer_list) => peer_list, + Err(peer_list_error) => { + // We expect that the cache will be missing for new Zebra installs + if peer_list_error.kind() == ErrorKind::NotFound { + return Ok(HashSet::new()); + } else { + info!( + ?peer_list_error, + "could not load cached peer list, using default seed peers" + ); + return Err(peer_list_error); + } + } + }; + + // Skip and log addresses that don't parse, and automatically deduplicate using the HashSet. + // (These issues shouldn't happen unless users modify the file.) + let peer_list: HashSet = peer_list + .lines() + .filter_map(|peer| { + peer.parse() + .map_err(|peer_parse_error| { + info!( + ?peer_parse_error, + "invalid peer address in cached peer list, skipping" + ); + peer_parse_error + }) + .ok() + }) + .collect(); + + // This log is needed for user debugging, but it's annoying during tests. + #[cfg(not(test))] + info!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "loaded cached peer IP addresses" + ); + #[cfg(test)] + debug!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "loaded cached peer IP addresses" + ); + + for ip in &peer_list { + // Count each initial peer, recording the cache file and loaded IP address. + // + // If an IP is returned by DNS seeders and the cache, + // each duplicate adds 1 to the initial peer count. + // (But we only make one initial connection attempt to each IP.) + metrics::counter!( + "zcash.net.peers.initial", + 1, + "cache" => peer_cache_file.display().to_string(), + "remote_ip" => ip.to_string() + ); + } + + Ok(peer_list) + } + + /// Atomically writes a new `peer_list` to the peer list cache file, if configured. + /// If the list is empty, keeps the previous cache file. + /// + /// Also creates the peer cache directory, if it doesn't already exist. + /// + /// Atomic writes avoid corrupting the cache if Zebra panics or crashes, or if multiple Zebra + /// instances try to read and write the same cache file. + pub async fn update_peer_cache(&self, peer_list: HashSet) -> io::Result<()> { + let Some(peer_cache_file) = self.cache_dir.peer_cache_file_path(self.network) else { + return Ok(()); + }; + + if peer_list.is_empty() { + info!( + ?peer_cache_file, + "cacheable peer list was empty, keeping previous cache" + ); + return Ok(()); + } + + // Turn IP addresses into strings + let mut peer_list: Vec = peer_list + .iter() + .take(MAX_PEER_DISK_CACHE_SIZE) + .map(|redacted_peer| redacted_peer.remove_socket_addr_privacy().to_string()) + .collect(); + // # Privacy + // + // Sort to destroy any peer order, which could leak peer connection times. + // (Currently the HashSet argument does this as well.) + peer_list.sort(); + // Make a newline-separated list + let peer_data = peer_list.join("\n"); + + // Write to a temporary file, so the cache is not corrupted if Zebra shuts down or crashes + // at the same time. + // + // # Concurrency + // + // We want to use async code to avoid blocking the tokio executor on filesystem operations, + // but `tempfile` is implemented using non-asyc methods. So we wrap its filesystem + // operations in `tokio::spawn_blocking()`. + // + // TODO: split this out into an atomic_write_to_tmp_file() method if we need to re-use it + + // Create the peer cache directory if needed + let peer_cache_dir = peer_cache_file + .parent() + .expect("cache path always has a network directory") + .to_owned(); + tokio::fs::create_dir_all(&peer_cache_dir).await?; + + // Give the temporary file a similar name to the permanent cache file, + // but hide it in directory listings. + let mut tmp_peer_cache_prefix: OsString = ".tmp.".into(); + tmp_peer_cache_prefix.push( + peer_cache_file + .file_name() + .expect("cache file always has a file name"), + ); + + // Create the temporary file. + // Do blocking filesystem operations on a dedicated thread. + let span = Span::current(); + let tmp_peer_cache_file = tokio::task::spawn_blocking(move || { + span.in_scope(move || { + // Put the temporary file in the same directory as the permanent file, + // so atomic filesystem operations are possible. + tempfile::Builder::new() + .prefix(&tmp_peer_cache_prefix) + .tempfile_in(peer_cache_dir) + }) + }) + .await + .expect("unexpected panic creating temporary peer cache file")?; + + // Write the list to the file asynchronously, by extracting the inner file, using it, + // then combining it back into a type that will correctly drop the file on error. + let (tmp_peer_cache_file, tmp_peer_cache_path) = tmp_peer_cache_file.into_parts(); + let mut tmp_peer_cache_file = tokio::fs::File::from_std(tmp_peer_cache_file); + tmp_peer_cache_file.write_all(peer_data.as_bytes()).await?; + + let tmp_peer_cache_file = + NamedTempFile::from_parts(tmp_peer_cache_file, tmp_peer_cache_path); + + // Atomically replace the current cache with the temporary cache. + // Do blocking filesystem operations on a dedicated thread. + let span = Span::current(); + tokio::task::spawn_blocking(move || { + span.in_scope(move || { + let result = tmp_peer_cache_file.persist(&peer_cache_file); + + // Drops the temp file if needed + match result { + Ok(_temp_file) => { + info!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "updated cached peer IP addresses" + ); + + for ip in &peer_list { + metrics::counter!( + "zcash.net.peers.cache", + 1, + "cache" => peer_cache_file.display().to_string(), + "remote_ip" => ip.to_string() + ); + } + + Ok(()) + } + Err(error) => Err(error.error), + } + }) + }) + .await + .expect("unexpected panic making temporary peer cache file permanent") + } } impl Default for Config { @@ -300,6 +598,7 @@ impl Default for Config { network: Network::Mainnet, initial_mainnet_peers: mainnet_peers, initial_testnet_peers: testnet_peers, + cache_dir: CacheDir::default(), crawl_new_peer_interval: DEFAULT_CRAWL_NEW_PEER_INTERVAL, // # Security @@ -309,7 +608,8 @@ impl Default for Config { // // But Zebra should only make a small number of initial outbound connections, // so that idle peers don't use too many connection slots. - peerset_initial_target_size: 25, + peerset_initial_target_size: DEFAULT_PEERSET_INITIAL_TARGET_SIZE, + max_connections_per_ip: DEFAULT_MAX_CONNS_PER_IP, } } } @@ -326,9 +626,11 @@ impl<'de> Deserialize<'de> for Config { network: Network, initial_mainnet_peers: IndexSet, initial_testnet_peers: IndexSet, + cache_dir: CacheDir, peerset_initial_target_size: usize, #[serde(alias = "new_peer_interval", with = "humantime_serde")] crawl_new_peer_interval: Duration, + max_connections_per_ip: Option, } impl Default for DConfig { @@ -339,32 +641,61 @@ impl<'de> Deserialize<'de> for Config { network: config.network, initial_mainnet_peers: config.initial_mainnet_peers, initial_testnet_peers: config.initial_testnet_peers, + cache_dir: config.cache_dir, peerset_initial_target_size: config.peerset_initial_target_size, crawl_new_peer_interval: config.crawl_new_peer_interval, + max_connections_per_ip: Some(config.max_connections_per_ip), } } } - let config = DConfig::deserialize(deserializer)?; - - // TODO: perform listener DNS lookups asynchronously with a timeout (#1631) - let listen_addr = match config.listen_addr.parse::() { + let DConfig { + listen_addr, + network, + initial_mainnet_peers, + initial_testnet_peers, + cache_dir, + peerset_initial_target_size, + crawl_new_peer_interval, + max_connections_per_ip, + } = DConfig::deserialize(deserializer)?; + + let listen_addr = match listen_addr.parse::() { Ok(socket) => Ok(socket), - Err(_) => match config.listen_addr.parse::() { - Ok(ip) => Ok(SocketAddr::new(ip, config.network.default_port())), + Err(_) => match listen_addr.parse::() { + Ok(ip) => Ok(SocketAddr::new(ip, network.default_port())), Err(err) => Err(de::Error::custom(format!( "{err}; Hint: addresses can be a IPv4, IPv6 (with brackets), or a DNS name, the port is optional" ))), }, }?; + let [max_connections_per_ip, peerset_initial_target_size] = [ + ("max_connections_per_ip", max_connections_per_ip, DEFAULT_MAX_CONNS_PER_IP), + // If we want Zebra to operate with no network, + // we should implement a `zebrad` command that doesn't use `zebra-network`. + ("peerset_initial_target_size", Some(peerset_initial_target_size), DEFAULT_PEERSET_INITIAL_TARGET_SIZE) + ].map(|(field_name, non_zero_config_field, default_config_value)| { + if non_zero_config_field == Some(0) { + warn!( + ?field_name, + ?non_zero_config_field, + "{field_name} should be greater than 0, using default value of {default_config_value} instead" + ); + } + + non_zero_config_field.filter(|config_value| config_value > &0).unwrap_or(default_config_value) + }); + Ok(Config { listen_addr: canonical_socket_addr(listen_addr), - network: config.network, - initial_mainnet_peers: config.initial_mainnet_peers, - initial_testnet_peers: config.initial_testnet_peers, - peerset_initial_target_size: config.peerset_initial_target_size, - crawl_new_peer_interval: config.crawl_new_peer_interval, + network, + initial_mainnet_peers, + initial_testnet_peers, + cache_dir, + peerset_initial_target_size, + crawl_new_peer_interval, + max_connections_per_ip, }) } } diff --git a/zebra-network/src/config/cache_dir.rs b/zebra-network/src/config/cache_dir.rs new file mode 100644 index 00000000000..112ebe704ec --- /dev/null +++ b/zebra-network/src/config/cache_dir.rs @@ -0,0 +1,74 @@ +//! Cache directory configuration for zebra-network. + +use std::path::{Path, PathBuf}; + +use zebra_chain::parameters::Network; + +/// A cache directory config field. +/// +/// This cache directory configuration field is optional. +/// It defaults to being enabled with the default config path, +/// but also allows a custom path to be set. +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[serde(untagged)] +pub enum CacheDir { + /// Whether the cache directory is enabled with the default path (`true`), + /// or disabled (`false`). + IsEnabled(bool), + + /// Enable the cache directory and use a custom path. + CustomPath(PathBuf), +} + +impl CacheDir { + /// Returns a `CacheDir` enabled with the default path. + pub fn default_path() -> Self { + Self::IsEnabled(true) + } + + /// Returns a disabled `CacheDir`. + pub fn disabled() -> Self { + Self::IsEnabled(false) + } + + /// Returns a custom `CacheDir` enabled with `path`. + pub fn custom_path(path: impl AsRef) -> Self { + Self::CustomPath(path.as_ref().to_owned()) + } + + /// Returns `true` if this `CacheDir` is enabled with the default or a custom path. + pub fn is_enabled(&self) -> bool { + match self { + CacheDir::IsEnabled(is_enabled) => *is_enabled, + CacheDir::CustomPath(_) => true, + } + } + + /// Returns the peer cache file path for `network`, if enabled. + pub fn peer_cache_file_path(&self, network: Network) -> Option { + Some( + self.cache_dir()? + .join("network") + .join(format!("{}.peers", network.lowercase_name())), + ) + } + + /// Returns the `zebra-network` base cache directory, if enabled. + pub fn cache_dir(&self) -> Option { + match self { + Self::IsEnabled(is_enabled) => is_enabled.then(|| { + dirs::cache_dir() + .unwrap_or_else(|| std::env::current_dir().unwrap().join("cache")) + .join("zebra") + }), + + Self::CustomPath(cache_dir) => Some(cache_dir.to_owned()), + } + } +} + +impl Default for CacheDir { + fn default() -> Self { + Self::default_path() + } +} diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 67e73874fd9..32c1c477599 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -67,6 +67,23 @@ pub const INBOUND_PEER_LIMIT_MULTIPLIER: usize = 5; /// See [`INBOUND_PEER_LIMIT_MULTIPLIER`] for details. pub const OUTBOUND_PEER_LIMIT_MULTIPLIER: usize = 3; +/// The default maximum number of peer connections Zebra will keep for a given IP address +/// before it drops any additional peer connections with that IP. +/// +/// This will be used as `Config.max_connections_per_ip` if no valid value is provided. +/// +/// Note: Zebra will currently avoid initiating outbound connections where it +/// has recently had a successful handshake with any address +/// on that IP. Zebra will not initiate more than 1 outbound connection +/// to an IP based on the default configuration, but it will accept more inbound +/// connections to an IP. +pub const DEFAULT_MAX_CONNS_PER_IP: usize = 1; + +/// The default peerset target size. +/// +/// This will be used as `Config.peerset_initial_target_size` if no valid value is provided. +pub const DEFAULT_PEERSET_INITIAL_TARGET_SIZE: usize = 25; + /// The buffer size for the peer set. /// /// This should be greater than 1 to avoid sender contention, but also reasonably @@ -83,13 +100,35 @@ pub const PEERSET_BUFFER_SIZE: usize = 3; /// and receiving a response from a remote peer. pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(20); -/// The timeout for handshakes when connecting to new peers. +/// The timeout for connections and handshakes when connecting to new peers. +/// +/// Outbound TCP connections must complete within this timeout, +/// then the handshake messages get an additional `HANDSHAKE_TIMEOUT` to complete. +/// (Inbound TCP accepts can't have a timeout, because they are handled by the OS.) /// /// This timeout should remain small, because it helps stop slow peers getting /// into the peer set. This is particularly important for network-constrained /// nodes, and on testnet. pub const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(3); +/// The maximum time difference for two address book changes to be considered concurrent. +/// +/// This prevents simultaneous or nearby important changes or connection progress +/// being overridden by less important changes. +/// +/// This timeout should be less than: +/// - the [peer reconnection delay](MIN_PEER_RECONNECTION_DELAY), and +/// - the [peer keepalive/heartbeat interval](HEARTBEAT_INTERVAL). +/// +/// But more than: +/// - the amount of time between connection events and address book updates, +/// even under heavy load (in tests, we have observed delays up to 500ms), +/// - the delay between an outbound connection failing, +/// and the [CandidateSet](crate::peer_set::CandidateSet) registering the failure, and +/// - the delay between the application closing a connection, +/// and any remaining positive changes from the peer. +pub const CONCURRENT_ADDRESS_CHANGE_PERIOD: Duration = Duration::from_secs(5); + /// We expect to receive a message from a live peer at least once in this time duration. /// /// This is the sum of: @@ -122,6 +161,22 @@ pub const INVENTORY_ROTATION_INTERVAL: Duration = Duration::from_secs(53); /// don't synchronise with other crawls. pub const DEFAULT_CRAWL_NEW_PEER_INTERVAL: Duration = Duration::from_secs(61); +/// The peer address disk cache update interval. +/// +/// This should be longer than [`DEFAULT_CRAWL_NEW_PEER_INTERVAL`], +/// but shorter than [`MAX_PEER_ACTIVE_FOR_GOSSIP`]. +/// +/// We use a short interval so Zebra instances which are restarted frequently +/// still have useful caches. +pub const PEER_DISK_CACHE_UPDATE_INTERVAL: Duration = Duration::from_secs(5 * 60); + +/// The maximum number of addresses in the peer disk cache. +/// +/// This is chosen to be less than the number of active peers, +/// and approximately the same as the number of seed peers returned by DNS. +/// It is a tradeoff between fingerprinting attacks, DNS pollution risk, and cache pollution risk. +pub const MAX_PEER_DISK_CACHE_SIZE: usize = 75; + /// The maximum duration since a peer was last seen to consider it reachable. /// /// This is used to prevent Zebra from gossiping addresses that are likely unreachable. Peers that @@ -298,6 +353,28 @@ pub const EWMA_DECAY_TIME_NANOS: f64 = 200.0 * NANOS_PER_SECOND; /// The number of nanoseconds in one second. const NANOS_PER_SECOND: f64 = 1_000_000_000.0; +/// The duration it takes for the drop probability of an overloaded connection to +/// reach [`MIN_OVERLOAD_DROP_PROBABILITY`]. +/// +/// Peer connections that receive multiple overloads have a higher probability of being dropped. +/// +/// The probability of a connection being dropped gradually decreases during this interval +/// until it reaches the default drop probability ([`MIN_OVERLOAD_DROP_PROBABILITY`]). +/// +/// Increasing this number increases the rate at which connections are dropped. +pub const OVERLOAD_PROTECTION_INTERVAL: Duration = MIN_INBOUND_PEER_CONNECTION_INTERVAL; + +/// The minimum probability of dropping a peer connection when it receives an +/// [`Overloaded`](crate::PeerError::Overloaded) error. +pub const MIN_OVERLOAD_DROP_PROBABILITY: f32 = 0.05; + +/// The maximum probability of dropping a peer connection when it receives an +/// [`Overloaded`](crate::PeerError::Overloaded) error. +pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.5; + +/// The minimum interval between logging peer set status updates. +pub const MIN_PEER_SET_LOG_INTERVAL: Duration = Duration::from_secs(60); + lazy_static! { /// The minimum network protocol version accepted by this crate for each network, /// represented as a network upgrade. diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index c867c135d8c..97eafef656e 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -103,6 +103,7 @@ //! //! Peer Inventory Service: //! * tracks gossiped `inv` advertisements for each peer +//! * updated before each `PeerSet` request is processed //! * tracks missing inventory for each peer //! * used by the `PeerSet` to route block and transaction requests //! to peers that have the requested data @@ -112,10 +113,15 @@ //! [`AddressBook`] Service: //! * maintains a list of peer addresses and associated connection attempt metadata //! * address book metadata is used to prioritise peer connection attempts +//! * updated by an independent thread based on peer connection status changes +//! * caches peer addresses to disk regularly using an independent task //! //! Initial Seed Peer Task: -//! * initiates new outbound peer connections to seed peers, resolving them via DNS if required -//! * adds seed peer addresses to the [`AddressBook`] +//! On startup: +//! * loads seed peers from the config, resolving them via DNS if required +//! * loads cached peer addresses from disk +//! * initiates new outbound peer connections to seed and cached peers +//! * adds seed and cached peer addresses to the [`AddressBook`] //! //! Peer Crawler Task: //! * discovers new peer addresses by sending `Addr` requests to connected peers @@ -151,6 +157,7 @@ pub mod constants; mod isolated; mod meta_addr; mod peer; +mod peer_cache_updater; mod peer_set; mod policies; mod protocol; @@ -174,7 +181,7 @@ pub use crate::{ pub use crate::{ address_book::AddressBook, address_book_peers::AddressBookPeers, - config::Config, + config::{CacheDir, Config}, isolated::{connect_isolated, connect_isolated_tcp_direct}, meta_addr::{PeerAddrState, PeerSocketAddr}, peer::{Client, ConnectedAddr, ConnectionInfo, HandshakeError, PeerError, SharedPeerError}, diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 3d9657fcb64..1f8572fd53c 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -1,7 +1,7 @@ //! An address-with-metadata type used in Bitcoin networking. use std::{ - cmp::{Ord, Ordering}, + cmp::{max, Ord, Ordering}, time::Instant, }; @@ -79,6 +79,38 @@ impl PeerAddrState { AttemptPending | Responded | Failed => false, } } + + /// Returns the typical connection state machine order of `self` and `other`. + /// Partially ordered states are sorted in connection attempt order. + /// + /// See [`MetaAddrChange::apply_to_meta_addr()`] for more details. + fn connection_state_order(&self, other: &Self) -> Ordering { + use Ordering::*; + match (self, other) { + _ if self == other => Equal, + // Peers start in one of the "never attempted" states, + // then typically progress towards a "responded" or "failed" state. + // + // # Security + // + // Prefer gossiped addresses to alternate addresses, + // so that peers can't replace the addresses of other peers. + // (This is currently checked explicitly by the address update code, + // but we respect the same order here as a precaution.) + (NeverAttemptedAlternate, _) => Less, + (_, NeverAttemptedAlternate) => Greater, + (NeverAttemptedGossiped, _) => Less, + (_, NeverAttemptedGossiped) => Greater, + (AttemptPending, _) => Less, + (_, AttemptPending) => Greater, + (Responded, _) => Less, + (_, Responded) => Greater, + // These patterns are redundant, but Rust doesn't assume that `==` is reflexive, + // so the first is still required (but unreachable). + (Failed, _) => Less, + //(_, Failed) => Greater, + } + } } // non-test code should explicitly specify the peer address state @@ -100,11 +132,7 @@ impl Ord for PeerAddrState { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match (self, other) { - (Responded, Responded) - | (Failed, Failed) - | (NeverAttemptedGossiped, NeverAttemptedGossiped) - | (NeverAttemptedAlternate, NeverAttemptedAlternate) - | (AttemptPending, AttemptPending) => Equal, + _ if self == other => Equal, // We reconnect to `Responded` peers that have stopped sending messages, // then `NeverAttempted` peers, then `Failed` peers (Responded, _) => Less, @@ -115,7 +143,10 @@ impl Ord for PeerAddrState { (_, NeverAttemptedAlternate) => Greater, (Failed, _) => Less, (_, Failed) => Greater, - // AttemptPending is covered by the other cases + // These patterns are redundant, but Rust doesn't assume that `==` is reflexive, + // so the first is still required (but unreachable). + (AttemptPending, _) => Less, + //(_, AttemptPending) => Greater, } } } @@ -195,6 +226,9 @@ pub struct MetaAddr { #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))] pub enum MetaAddrChange { + // TODO: + // - split the common `addr` field into an outer struct + // /// Creates a `MetaAddr` for an initial peer. NewInitial { #[cfg_attr( @@ -527,6 +561,17 @@ impl MetaAddr { } } + /// Returns true if any messages were recently sent to or received from this address. + pub fn was_recently_updated( + &self, + instant_now: Instant, + chrono_now: chrono::DateTime, + ) -> bool { + self.has_connection_recently_responded(chrono_now) + || self.was_connection_recently_attempted(instant_now) + || self.has_connection_recently_failed(instant_now) + } + /// Is this address ready for a new outbound connection attempt? pub fn is_ready_for_connection_attempt( &self, @@ -535,9 +580,7 @@ impl MetaAddr { network: Network, ) -> bool { self.last_known_info_is_valid_for_outbound(network) - && !self.has_connection_recently_responded(chrono_now) - && !self.was_connection_recently_attempted(instant_now) - && !self.has_connection_recently_failed(instant_now) + && !self.was_recently_updated(instant_now, chrono_now) && self.is_probably_reachable(chrono_now) } @@ -694,7 +737,7 @@ impl MetaAddrChange { } /// Return the untrusted last seen time for this change, if available. - pub fn untrusted_last_seen(&self) -> Option { + pub fn untrusted_last_seen(&self, now: DateTime32) -> Option { match self { NewInitial { .. } => None, NewGossiped { @@ -703,15 +746,34 @@ impl MetaAddrChange { } => Some(*untrusted_last_seen), NewAlternate { .. } => None, // We know that our local listener is available - NewLocal { .. } => Some(DateTime32::now()), + NewLocal { .. } => Some(now), UpdateAttempt { .. } => None, UpdateResponded { .. } => None, UpdateFailed { .. } => None, } } + // # Concurrency + // + // We assign a time to each change when it is applied to the address book by either the + // address book updater or candidate set tasks. This is the time that the change was received + // from the updater channel, rather than the time that the message was read from the peer + // connection. + // + // Since the connection tasks run concurrently in an unspecified order, and the address book + // updater runs in a separate thread, these times are almost always very similar. If Zebra's + // address book is under load, we should use lower rate-limits for new inbound or outbound + // connections, disconnections, peer gossip crawls, or peer `UpdateResponded` updates. + // + // TODO: + // - move the time API calls from `impl MetaAddrChange` `last_*()` methods: + // - if they impact performance, call them once in the address book updater task, + // then apply them to all the waiting changes + // - otherwise, move them to the `impl MetaAddrChange` `new_*()` methods, + // so they are called in the connection tasks + // /// Return the last attempt for this change, if available. - pub fn last_attempt(&self) -> Option { + pub fn last_attempt(&self, now: Instant) -> Option { match self { NewInitial { .. } => None, NewGossiped { .. } => None, @@ -720,14 +782,14 @@ impl MetaAddrChange { // Attempt changes are applied before we start the handshake to the // peer address. So the attempt time is a lower bound for the actual // handshake time. - UpdateAttempt { .. } => Some(Instant::now()), + UpdateAttempt { .. } => Some(now), UpdateResponded { .. } => None, UpdateFailed { .. } => None, } } /// Return the last response for this change, if available. - pub fn last_response(&self) -> Option { + pub fn last_response(&self, now: DateTime32) -> Option { match self { NewInitial { .. } => None, NewGossiped { .. } => None, @@ -739,13 +801,13 @@ impl MetaAddrChange { // - we might send outdated last seen times to our peers, and // - the peer will appear to be live for longer, delaying future // reconnection attempts. - UpdateResponded { .. } => Some(DateTime32::now()), + UpdateResponded { .. } => Some(now), UpdateFailed { .. } => None, } } /// Return the last failure for this change, if available. - pub fn last_failure(&self) -> Option { + pub fn last_failure(&self, now: Instant) -> Option { match self { NewInitial { .. } => None, NewGossiped { .. } => None, @@ -758,7 +820,7 @@ impl MetaAddrChange { // states for longer, and // - the peer will appear to be used for longer, delaying future // reconnection attempts. - UpdateFailed { .. } => Some(Instant::now()), + UpdateFailed { .. } => Some(now), } } @@ -776,93 +838,212 @@ impl MetaAddrChange { } } - /// If this change can create a new `MetaAddr`, return that address. - pub fn into_new_meta_addr(self) -> Option { - Some(MetaAddr { + /// Returns the corresponding `MetaAddr` for this change. + pub fn into_new_meta_addr(self, instant_now: Instant, local_now: DateTime32) -> MetaAddr { + MetaAddr { addr: self.addr(), services: self.untrusted_services(), - untrusted_last_seen: self.untrusted_last_seen(), - last_response: self.last_response(), - last_attempt: self.last_attempt(), - last_failure: self.last_failure(), + untrusted_last_seen: self.untrusted_last_seen(local_now), + last_response: self.last_response(local_now), + last_attempt: self.last_attempt(instant_now), + last_failure: self.last_failure(instant_now), last_connection_state: self.peer_addr_state(), - }) + } + } + + /// Returns the corresponding [`MetaAddr`] for a local listener change. + /// + /// This method exists so we don't have to provide an unused [`Instant`] to get a local + /// listener `MetaAddr`. + /// + /// # Panics + /// + /// If this change is not a [`MetaAddrChange::NewLocal`]. + pub fn local_listener_into_new_meta_addr(self, local_now: DateTime32) -> MetaAddr { + assert!(matches!(self, MetaAddrChange::NewLocal { .. })); + + MetaAddr { + addr: self.addr(), + services: self.untrusted_services(), + untrusted_last_seen: self.untrusted_last_seen(local_now), + last_response: self.last_response(local_now), + last_attempt: None, + last_failure: None, + last_connection_state: self.peer_addr_state(), + } } /// Apply this change to a previous `MetaAddr` from the address book, /// producing a new or updated `MetaAddr`. /// /// If the change isn't valid for the `previous` address, returns `None`. - pub fn apply_to_meta_addr(&self, previous: impl Into>) -> Option { - if let Some(previous) = previous.into() { - assert_eq!(previous.addr, self.addr(), "unexpected addr mismatch"); - - let previous_has_been_attempted = !previous.last_connection_state.is_never_attempted(); - let change_to_never_attempted = self - .into_new_meta_addr() - .map(|meta_addr| meta_addr.last_connection_state.is_never_attempted()) - .unwrap_or(false); - - if change_to_never_attempted { - if previous_has_been_attempted { - // Existing entry has been attempted, change is NeverAttempted - // - ignore the change - // - // # Security - // - // Ignore NeverAttempted changes once we have made an attempt, - // so malicious peers can't keep changing our peer connection order. - None - } else { - // Existing entry and change are both NeverAttempted - // - preserve original values of all fields - // - but replace None with Some - // - // # Security - // - // Preserve the original field values for NeverAttempted peers, - // so malicious peers can't keep changing our peer connection order. - Some(MetaAddr { - addr: self.addr(), - services: previous.services.or_else(|| self.untrusted_services()), - untrusted_last_seen: previous - .untrusted_last_seen - .or_else(|| self.untrusted_last_seen()), - // The peer has not been attempted, so these fields must be None - last_response: None, - last_attempt: None, - last_failure: None, - last_connection_state: self.peer_addr_state(), - }) - } - } else { - // Existing entry and change are both Attempt, Responded, Failed - // - ignore changes to earlier times - // - update the services from the change - // - // # Security - // - // Ignore changes to earlier times. This enforces the peer - // connection timeout, even if changes are applied out of order. - Some(MetaAddr { - addr: self.addr(), - // We want up-to-date services, even if they have fewer bits, - // or they are applied out of order. - services: self.untrusted_services().or(previous.services), - // Only NeverAttempted changes can modify the last seen field - untrusted_last_seen: previous.untrusted_last_seen, - // Since Some(time) is always greater than None, `max` prefers: - // - the latest time if both are Some - // - Some(time) if the other is None - last_response: self.last_response().max(previous.last_response), - last_attempt: self.last_attempt().max(previous.last_attempt), - last_failure: self.last_failure().max(previous.last_failure), - last_connection_state: self.peer_addr_state(), + #[allow(clippy::unwrap_in_result)] + pub fn apply_to_meta_addr( + &self, + previous: impl Into>, + instant_now: Instant, + chrono_now: chrono::DateTime, + ) -> Option { + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let Some(previous) = previous.into() else { + // no previous: create a new entry + return Some(self.into_new_meta_addr(instant_now, local_now)); + }; + + assert_eq!(previous.addr, self.addr(), "unexpected addr mismatch"); + + let instant_previous = max(previous.last_attempt, previous.last_failure); + let local_previous = previous.last_response; + + // Is this change potentially concurrent with the previous change? + // + // Since we're using saturating arithmetic, one of each pair of less than comparisons + // will always be true, because subtraction saturates to zero. + let change_is_concurrent = instant_previous + .map(|instant_previous| { + instant_previous.saturating_duration_since(instant_now) + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD + && instant_now.saturating_duration_since(instant_previous) + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD + }) + .unwrap_or_default() + || local_previous + .map(|local_previous| { + local_previous.saturating_duration_since(local_now).to_std() + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD + && local_now.saturating_duration_since(local_previous).to_std() + < constants::CONCURRENT_ADDRESS_CHANGE_PERIOD }) - } + .unwrap_or_default(); + let change_is_out_of_order = instant_previous + .map(|instant_previous| instant_previous > instant_now) + .unwrap_or_default() + || local_previous + .map(|local_previous| local_previous > local_now) + .unwrap_or_default(); + + // Is this change typically from a connection state that has more progress? + let connection_has_more_progress = self + .peer_addr_state() + .connection_state_order(&previous.last_connection_state) + == Ordering::Greater; + + let previous_has_been_attempted = !previous.last_connection_state.is_never_attempted(); + let change_to_never_attempted = self.peer_addr_state().is_never_attempted(); + + // Invalid changes + + if change_to_never_attempted && previous_has_been_attempted { + // Existing entry has been attempted, change is NeverAttempted + // - ignore the change + // + // # Security + // + // Ignore NeverAttempted changes once we have made an attempt, + // so malicious peers can't keep changing our peer connection order. + return None; + } + + if change_is_out_of_order && !change_is_concurrent { + // Change is significantly out of order: ignore it. + // + // # Security + // + // Ignore changes that arrive out of order, if they are far enough apart. + // This enforces the peer connection retry interval. + return None; + } + + if change_is_concurrent && !connection_has_more_progress { + // Change is close together in time, and it would revert the connection to an earlier + // state. + // + // # Security + // + // If the changes might have been concurrent, ignore connection states with less + // progress. + // + // ## Sources of Concurrency + // + // If two changes happen close together, the async scheduler can run their change + // send and apply code in any order. This includes the code that records the time of + // the change. So even if a failure happens after a response message, the failure time + // can be recorded before the response time code is run. + // + // Some machines and OSes have limited time resolution, so we can't guarantee that + // two messages on the same connection will always have different times. There are + // also known bugs impacting monotonic times which make them go backwards or stay + // equal. For wall clock times, clock skew is an expected event, particularly with + // network time server updates. + // + // Also, the application can fail a connection independently and simultaneously + // (or slightly before) a positive update from that peer connection. We want the + // application change to take priority in the address book, because the connection + // state machine also prioritises failures over any other peer messages. + // + // ## Resolution + // + // In these cases, we want to apply the failure, then ignore any nearby changes that + // reset the address book entry to a more appealing state. This prevents peers from + // sending updates right before failing a connection, in order to make themselves more + // likely to get a reconnection. + // + // The connection state machine order is used so that state transitions which are + // typically close together are preserved. These transitions are: + // - NeverAttempted*->AttemptPending->(Responded|Failed) + // - Responded->Failed + // + // State transitions like (Responded|Failed)->AttemptPending only happen after the + // reconnection timeout, so they will never be considered concurrent. + return None; + } + + // Valid changes + + if change_to_never_attempted && !previous_has_been_attempted { + // Existing entry and change are both NeverAttempted + // - preserve original values of all fields + // - but replace None with Some + // + // # Security + // + // Preserve the original field values for NeverAttempted peers, + // so malicious peers can't keep changing our peer connection order. + Some(MetaAddr { + addr: self.addr(), + services: previous.services.or_else(|| self.untrusted_services()), + untrusted_last_seen: previous + .untrusted_last_seen + .or_else(|| self.untrusted_last_seen(local_now)), + // The peer has not been attempted, so these fields must be None + last_response: None, + last_attempt: None, + last_failure: None, + last_connection_state: self.peer_addr_state(), + }) } else { - // no previous: create a new entry - self.into_new_meta_addr() + // Existing entry and change are both Attempt, Responded, Failed, + // and the change is later, either in time or in connection progress + // (this is checked above and returns None early): + // - update the fields from the change + Some(MetaAddr { + addr: self.addr(), + // Always update optional fields, unless the update is None. + // + // We want up-to-date services, even if they have fewer bits + services: self.untrusted_services().or(previous.services), + // Only NeverAttempted changes can modify the last seen field + untrusted_last_seen: previous.untrusted_last_seen, + // This is a wall clock time, but we already checked that responses are in order. + // Even if the wall clock time has jumped, we want to use the latest time. + last_response: self.last_response(local_now).or(previous.last_response), + // These are monotonic times, we already checked the responses are in order. + last_attempt: self.last_attempt(instant_now).or(previous.last_attempt), + last_failure: self.last_failure(instant_now).or(previous.last_failure), + // Replace the state with the updated state. + last_connection_state: self.peer_addr_state(), + }) } } } diff --git a/zebra-network/src/meta_addr/arbitrary.rs b/zebra-network/src/meta_addr/arbitrary.rs index 955607e775d..d7d4dd840e8 100644 --- a/zebra-network/src/meta_addr/arbitrary.rs +++ b/zebra-network/src/meta_addr/arbitrary.rs @@ -1,5 +1,7 @@ //! Randomised test data generation for MetaAddr. +use std::{net::IpAddr, time::Instant}; + use proptest::{arbitrary::any, collection::vec, prelude::*}; use zebra_chain::{parameters::Network::*, serialization::DateTime32}; @@ -49,12 +51,20 @@ impl MetaAddr { /// /// [1]: super::PeerAddrState::NeverAttemptedAlternate pub fn alternate_strategy() -> BoxedStrategy { - (canonical_peer_addr_strategy(), any::()) - .prop_map(|(socket_addr, untrusted_services)| { - MetaAddr::new_alternate(socket_addr, &untrusted_services) - .into_new_meta_addr() - .expect("unexpected invalid alternate change") - }) + ( + canonical_peer_addr_strategy(), + any::(), + any::(), + any::(), + ) + .prop_map( + |(socket_addr, untrusted_services, instant_now, local_now)| { + // instant_now is not actually used for this variant, + // so we could just provide a default value + MetaAddr::new_alternate(socket_addr, &untrusted_services) + .into_new_meta_addr(instant_now, local_now) + }, + ) .boxed() } } @@ -89,7 +99,17 @@ impl MetaAddrChange { .boxed() } - /// Create a strategy that generates [`MetaAddrChange`]s which are ready for + /// Create a strategy that generates [`IpAddr`]s for [`MetaAddrChange`]s which are ready for + /// outbound connections. + pub fn ready_outbound_strategy_ip() -> BoxedStrategy { + any::() + .prop_filter("failed MetaAddr::is_valid_for_outbound", |ip| { + !ip.is_unspecified() + }) + .boxed() + } + + /// Create a strategy that generates port numbers for [`MetaAddrChange`]s which are ready for /// outbound connections. /// /// Currently, all generated changes are the [`NewAlternate`][1] variant. @@ -97,23 +117,30 @@ impl MetaAddrChange { /// fields. (After PR #2276 merges.) /// /// [1]: super::NewAlternate - pub fn ready_outbound_strategy() -> BoxedStrategy { - canonical_peer_addr_strategy() - .prop_filter_map("failed MetaAddr::is_valid_for_outbound", |addr| { - // Alternate nodes use the current time, so they're always ready - // - // TODO: create a "Zebra supported services" constant - let change = MetaAddr::new_alternate(addr, &PeerServices::NODE_NETWORK); - if change - .into_new_meta_addr() - .expect("unexpected invalid alternate change") - .last_known_info_is_valid_for_outbound(Mainnet) - { - Some(change) - } else { - None - } - }) + pub fn ready_outbound_strategy_port() -> BoxedStrategy { + ( + canonical_peer_addr_strategy(), + any::(), + any::(), + ) + .prop_filter_map( + "failed MetaAddr::is_valid_for_outbound", + |(addr, instant_now, local_now)| { + // Alternate nodes use the current time, so they're always ready + // + // TODO: create a "Zebra supported services" constant + + let change = MetaAddr::new_alternate(addr, &PeerServices::NODE_NETWORK); + if change + .into_new_meta_addr(instant_now, local_now) + .last_known_info_is_valid_for_outbound(Mainnet) + { + Some(addr.port()) + } else { + None + } + }, + ) .boxed() } } diff --git a/zebra-network/src/meta_addr/peer_addr.rs b/zebra-network/src/meta_addr/peer_addr.rs index 09876a080a7..92a27defcca 100644 --- a/zebra-network/src/meta_addr/peer_addr.rs +++ b/zebra-network/src/meta_addr/peer_addr.rs @@ -3,7 +3,7 @@ use std::{ fmt, - net::SocketAddr, + net::{Ipv4Addr, SocketAddr}, ops::{Deref, DerefMut}, str::FromStr, }; @@ -64,3 +64,16 @@ impl DerefMut for PeerSocketAddr { &mut self.0 } } + +impl PeerSocketAddr { + /// Returns an unspecified `PeerSocketAddr`, which can't be used for outbound connections. + pub fn unspecified() -> Self { + Self(SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0)) + } + + /// Return the underlying [`SocketAddr`], which allows sensitive peer address information to + /// be printed and logged. + pub fn remove_socket_addr_privacy(&self) -> SocketAddr { + **self + } +} diff --git a/zebra-network/src/meta_addr/tests/prop.rs b/zebra-network/src/meta_addr/tests/prop.rs index 0b5f968aebd..dfd497b3d22 100644 --- a/zebra-network/src/meta_addr/tests/prop.rs +++ b/zebra-network/src/meta_addr/tests/prop.rs @@ -4,14 +4,16 @@ use std::{collections::HashMap, env, net::SocketAddr, str::FromStr, sync::Arc, t use chrono::Utc; use proptest::{collection::vec, prelude::*}; -use tokio::time::Instant; use tower::service_fn; use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::DateTime32}; use crate::{ - constants::{MAX_ADDRS_IN_ADDRESS_BOOK, MAX_RECENT_PEER_AGE, MIN_PEER_RECONNECTION_DELAY}, + constants::{ + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, MAX_RECENT_PEER_AGE, + MIN_PEER_RECONNECTION_DELAY, + }, meta_addr::{ arbitrary::{MAX_ADDR_CHANGE, MAX_META_ADDR}, MetaAddr, MetaAddrChange, @@ -64,8 +66,12 @@ proptest! { ) { let _init_guard = zebra_test::init(); + let instant_now = std::time::Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + for change in changes { - if let Some(changed_addr) = change.apply_to_meta_addr(addr) { + if let Some(changed_addr) = change.apply_to_meta_addr(addr, instant_now, chrono_now) { // untrusted last seen times: // check that we replace None with Some, but leave Some unchanged if addr.untrusted_last_seen.is_some() { @@ -73,7 +79,7 @@ proptest! { } else { prop_assert_eq!( changed_addr.untrusted_last_seen, - change.untrusted_last_seen() + change.untrusted_last_seen(local_now) ); } @@ -112,18 +118,22 @@ proptest! { for change in changes { while addr.is_ready_for_connection_attempt(instant_now, chrono_now, Mainnet) { - attempt_count += 1; - // Assume that this test doesn't last longer than MIN_PEER_RECONNECTION_DELAY - prop_assert!(attempt_count <= 1); - // Simulate an attempt - addr = MetaAddr::new_reconnect(addr.addr) - .apply_to_meta_addr(addr) - .expect("unexpected invalid attempt"); + addr = if let Some(addr) = MetaAddr::new_reconnect(addr.addr) + .apply_to_meta_addr(addr, instant_now, chrono_now) { + attempt_count += 1; + // Assume that this test doesn't last longer than MIN_PEER_RECONNECTION_DELAY + prop_assert!(attempt_count <= 1); + addr + } else { + // Stop updating when an attempt comes too soon after a failure. + // In production these are prevented by the dialer code. + break; + } } // If `change` is invalid for the current MetaAddr state, skip it. - if let Some(changed_addr) = change.apply_to_meta_addr(addr) { + if let Some(changed_addr) = change.apply_to_meta_addr(addr, instant_now, chrono_now) { prop_assert_eq!(changed_addr.addr, addr.addr); addr = changed_addr; } @@ -149,13 +159,14 @@ proptest! { let address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), address_book_addrs ); let sanitized_addrs = address_book.sanitized(chrono_now); - let expected_local_listener = address_book.local_listener_meta_addr(); + let expected_local_listener = address_book.local_listener_meta_addr(chrono_now); let canonical_local_listener = canonical_peer_addr(local_listener); let book_sanitized_local_listener = sanitized_addrs .iter() @@ -186,9 +197,12 @@ proptest! { let local_listener = "0.0.0.0:0".parse().expect("unexpected invalid SocketAddr"); + let instant_now = std::time::Instant::now(); + let chrono_now = Utc::now(); + for change in changes { // Check direct application - let new_addr = change.apply_to_meta_addr(None); + let new_addr = change.apply_to_meta_addr(None, instant_now, chrono_now); prop_assert!( new_addr.is_some(), @@ -204,6 +218,7 @@ proptest! { let mut address_book = AddressBook::new_with_addrs( local_listener, Mainnet, + DEFAULT_MAX_CONNS_PER_IP, 1, Span::none(), Vec::new(), @@ -317,6 +332,7 @@ proptest! { let address_book = Arc::new(std::sync::Mutex::new(AddressBook::new_with_addrs( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, MAX_ADDRS_IN_ADDRESS_BOOK, Span::none(), addrs, @@ -328,7 +344,7 @@ proptest! { tokio::time::pause(); // The earliest time we can have a valid next attempt for this peer - let earliest_next_attempt = Instant::now() + MIN_PEER_RECONNECTION_DELAY; + let earliest_next_attempt = tokio::time::Instant::now() + MIN_PEER_RECONNECTION_DELAY; // The number of attempts for this peer in the last MIN_PEER_RECONNECTION_DELAY let mut attempt_count: usize = 0; @@ -349,7 +365,7 @@ proptest! { original addr was in address book: {}\n", candidate_addr, i, - Instant::now(), + tokio::time::Instant::now(), earliest_next_attempt, attempt_count, LIVE_PEER_INTERVALS, @@ -365,7 +381,7 @@ proptest! { address_book.clone().lock().unwrap().update(change); tokio::time::advance(peer_change_interval).await; - if Instant::now() >= earliest_next_attempt { + if tokio::time::Instant::now() >= earliest_next_attempt { attempt_count = 0; } } @@ -423,20 +439,24 @@ proptest! { let change = changes.get(change_index); while addr.is_ready_for_connection_attempt(instant_now, chrono_now, Mainnet) { - *attempt_counts.entry(addr.addr).or_default() += 1; - prop_assert!( - *attempt_counts.get(&addr.addr).unwrap() <= LIVE_PEER_INTERVALS + 1 - ); - // Simulate an attempt - *addr = MetaAddr::new_reconnect(addr.addr) - .apply_to_meta_addr(*addr) - .expect("unexpected invalid attempt"); + *addr = if let Some(addr) = MetaAddr::new_reconnect(addr.addr) + .apply_to_meta_addr(*addr, instant_now, chrono_now) { + *attempt_counts.entry(addr.addr).or_default() += 1; + prop_assert!( + *attempt_counts.get(&addr.addr).unwrap() <= LIVE_PEER_INTERVALS + 1 + ); + addr + } else { + // Stop updating when an attempt comes too soon after a failure. + // In production these are prevented by the dialer code. + break; + } } // If `change` is invalid for the current MetaAddr state, skip it. // If we've run out of changes for this addr, do nothing. - if let Some(changed_addr) = change.and_then(|change| change.apply_to_meta_addr(*addr)) + if let Some(changed_addr) = change.and_then(|change| change.apply_to_meta_addr(*addr, instant_now, chrono_now)) { prop_assert_eq!(changed_addr.addr, addr.addr); *addr = changed_addr; diff --git a/zebra-network/src/meta_addr/tests/vectors.rs b/zebra-network/src/meta_addr/tests/vectors.rs index 187f70778a2..5b341901b18 100644 --- a/zebra-network/src/meta_addr/tests/vectors.rs +++ b/zebra-network/src/meta_addr/tests/vectors.rs @@ -1,5 +1,7 @@ //! Fixed test cases for MetaAddr and MetaAddrChange. +use std::time::Instant; + use chrono::Utc; use zebra_chain::{ @@ -7,7 +9,11 @@ use zebra_chain::{ serialization::{DateTime32, Duration32}, }; -use crate::{constants::MAX_PEER_ACTIVE_FOR_GOSSIP, protocol::types::PeerServices, PeerSocketAddr}; +use crate::{ + constants::{CONCURRENT_ADDRESS_CHANGE_PERIOD, MAX_PEER_ACTIVE_FOR_GOSSIP}, + protocol::types::PeerServices, + PeerSocketAddr, +}; use super::{super::MetaAddr, check}; @@ -57,12 +63,13 @@ fn sanitize_extremes() { fn new_local_listener_is_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); - let peer = MetaAddr::new_local_listener_change(address) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + let peer = + MetaAddr::new_local_listener_change(address).into_new_meta_addr(instant_now, local_now); assert!(peer.is_active_for_gossip(chrono_now)); } @@ -75,12 +82,13 @@ fn new_local_listener_is_gossipable() { fn new_alternate_peer_address_is_not_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); assert!(!peer.is_active_for_gossip(chrono_now)); } @@ -153,16 +161,17 @@ fn gossiped_peer_reportedly_seen_long_ago_is_not_gossipable() { fn recently_responded_peer_is_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) - .apply_to_meta_addr(peer_seed) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); assert!(peer.is_active_for_gossip(chrono_now)); @@ -173,16 +182,17 @@ fn recently_responded_peer_is_gossipable() { fn not_so_recently_responded_peer_is_still_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) - .apply_to_meta_addr(peer_seed) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); // Tweak the peer's last response time to be within the limits of the reachable duration @@ -203,16 +213,17 @@ fn not_so_recently_responded_peer_is_still_gossipable() { fn responded_long_ago_peer_is_not_gossipable() { let _init_guard = zebra_test::init(); + let instant_now = Instant::now(); let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) - .into_new_meta_addr() - .expect("MetaAddrChange can't create a new MetaAddr"); + .into_new_meta_addr(instant_now, local_now); // Create a peer that has responded let mut peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) - .apply_to_meta_addr(peer_seed) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) .expect("Failed to create MetaAddr for responded peer"); // Tweak the peer's last response time to be outside the limits of the reachable duration @@ -227,3 +238,210 @@ fn responded_long_ago_peer_is_not_gossipable() { assert!(!peer.is_active_for_gossip(chrono_now)); } + +/// Test that a change that is delayed for a long time is not applied to the address state. +#[test] +fn long_delayed_change_is_not_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create an earlier change to Failed that has been delayed a long time. + // Failed typically comes after Responded, so it will pass the connection progress check. + // + // This is very unlikely in the May 2023 production code, + // but it can happen due to getting the time, then waiting for the address book mutex. + + // Create some change times that are much earlier + let instant_early = instant_now - (CONCURRENT_ADDRESS_CHANGE_PERIOD * 3); + let chrono_early = chrono_now + - chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD * 3) + .expect("constant is valid"); + + let change = MetaAddr::new_errored(address, PeerServices::NODE_NETWORK); + let outcome = change.apply_to_meta_addr(peer, instant_early, chrono_early); + + assert_eq!( + outcome, None, + "\n\ + unexpected application of a much earlier change to a peer:\n\ + change: {change:?}\n\ + times: {instant_early:?} {chrono_early}\n\ + peer: {peer:?}" + ); +} + +/// Test that a change that happens a long time after the previous change +/// is applied to the address state, even if it is a revert. +#[test] +fn later_revert_change_is_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create an earlier change to AttemptPending that happens a long time later. + // AttemptPending typically comes before Responded, so it will fail the connection progress + // check, but that failure should be ignored because it is not concurrent. + // + // This is a typical reconnect in production. + + // Create some change times that are much later + let instant_late = instant_now + (CONCURRENT_ADDRESS_CHANGE_PERIOD * 3); + let chrono_late = chrono_now + + chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD * 3) + .expect("constant is valid"); + + let change = MetaAddr::new_reconnect(address); + let outcome = change.apply_to_meta_addr(peer, instant_late, chrono_late); + + assert!( + outcome.is_some(), + "\n\ + unexpected skipped much later change to a peer:\n\ + change: {change:?}\n\ + times: {instant_late:?} {chrono_late}\n\ + peer: {peer:?}" + ); +} + +/// Test that a concurrent change which reverses the connection state is not applied. +#[test] +fn concurrent_state_revert_change_is_not_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create a concurrent change to AttemptPending. + // AttemptPending typically comes before Responded, so it will fail the progress check. + // + // This is likely to happen in production, it just requires a short delay in the earlier change. + + // Create some change times that are earlier but concurrent + let instant_early = instant_now - (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_early = chrono_now + - chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_reconnect(address); + let outcome = change.apply_to_meta_addr(peer, instant_early, chrono_early); + + assert_eq!( + outcome, None, + "\n\ + unexpected application of an early concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_early:?} {chrono_early}\n\ + peer: {peer:?}" + ); + + // Create some change times that are later but concurrent + let instant_late = instant_now + (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_late = chrono_now + + chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_reconnect(address); + let outcome = change.apply_to_meta_addr(peer, instant_late, chrono_late); + + assert_eq!( + outcome, None, + "\n\ + unexpected application of a late concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_late:?} {chrono_late}\n\ + peer: {peer:?}" + ); +} + +/// Test that a concurrent change which progresses the connection state is applied. +#[test] +fn concurrent_state_progress_change_is_applied() { + let _init_guard = zebra_test::init(); + + let instant_now = Instant::now(); + let chrono_now = Utc::now(); + let local_now: DateTime32 = chrono_now.try_into().expect("will succeed until 2038"); + + let address = PeerSocketAddr::from(([192, 168, 180, 9], 10_000)); + let peer_seed = MetaAddr::new_alternate(address, &PeerServices::NODE_NETWORK) + .into_new_meta_addr(instant_now, local_now); + + // Create a peer that has responded + let peer = MetaAddr::new_responded(address, &PeerServices::NODE_NETWORK) + .apply_to_meta_addr(peer_seed, instant_now, chrono_now) + .expect("Failed to create MetaAddr for responded peer"); + + // Create a concurrent change to Failed. + // Failed typically comes after Responded, so it will pass the progress check. + // + // This is a typical update in production. + + // Create some change times that are earlier but concurrent + let instant_early = instant_now - (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_early = chrono_now + - chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_errored(address, None); + let outcome = change.apply_to_meta_addr(peer, instant_early, chrono_early); + + assert!( + outcome.is_some(), + "\n\ + unexpected skipped early concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_early:?} {chrono_early}\n\ + peer: {peer:?}" + ); + + // Create some change times that are later but concurrent + let instant_late = instant_now + (CONCURRENT_ADDRESS_CHANGE_PERIOD / 2); + let chrono_late = chrono_now + + chrono::Duration::from_std(CONCURRENT_ADDRESS_CHANGE_PERIOD / 2) + .expect("constant is valid"); + + let change = MetaAddr::new_errored(address, None); + let outcome = change.apply_to_meta_addr(peer, instant_late, chrono_late); + + assert!( + outcome.is_some(), + "\n\ + unexpected skipped late concurrent change to a peer:\n\ + change: {change:?}\n\ + times: {instant_late:?} {chrono_late}\n\ + peer: {peer:?}" + ); +} diff --git a/zebra-network/src/peer/client.rs b/zebra-network/src/peer/client.rs index f264cc5ff98..69940275414 100644 --- a/zebra-network/src/peer/client.rs +++ b/zebra-network/src/peer/client.rs @@ -543,10 +543,14 @@ impl Client { // Prevent any senders from sending more messages to this peer. self.server_tx.close_channel(); - // Stop the heartbeat task + // Ask the heartbeat task to stop. if let Some(shutdown_tx) = self.shutdown_tx.take() { let _ = shutdown_tx.send(CancelHeartbeatTask); } + + // Force the connection and heartbeat tasks to stop. + self.connection_task.abort(); + self.heartbeat_task.abort(); } } diff --git a/zebra-network/src/peer/connection.rs b/zebra-network/src/peer/connection.rs index 20429cc7353..2266085812e 100644 --- a/zebra-network/src/peer/connection.rs +++ b/zebra-network/src/peer/connection.rs @@ -7,15 +7,16 @@ //! And it's unclear if these assumptions match the `zcashd` implementation. //! It should be refactored into a cleaner set of request/response pairs (#1515). -use std::{borrow::Cow, collections::HashSet, fmt, pin::Pin, sync::Arc}; +use std::{borrow::Cow, collections::HashSet, fmt, pin::Pin, sync::Arc, time::Instant}; use futures::{ future::{self, Either}, prelude::*, stream::Stream, }; +use rand::{thread_rng, Rng}; use tokio::time::{sleep, Sleep}; -use tower::Service; +use tower::{Service, ServiceExt}; use tracing_futures::Instrument; use zebra_chain::{ @@ -25,7 +26,10 @@ use zebra_chain::{ }; use crate::{ - constants, + constants::{ + self, MAX_OVERLOAD_DROP_PROBABILITY, MIN_OVERLOAD_DROP_PROBABILITY, + OVERLOAD_PROTECTION_INTERVAL, + }, meta_addr::MetaAddr, peer::{ connection::peer_tx::PeerTx, error::AlreadyErrored, ClientRequest, ClientRequestReceiver, @@ -226,11 +230,11 @@ impl Handler { if missing_transaction_ids != pending_ids { trace!(?missing_invs, ?missing_transaction_ids, ?pending_ids); // if these errors are noisy, we should replace them with debugs - info!("unexpected notfound message from peer: all remaining transaction hashes should be listed in the notfound. Using partial received transactions as the peer response"); + debug!("unexpected notfound message from peer: all remaining transaction hashes should be listed in the notfound. Using partial received transactions as the peer response"); } if missing_transaction_ids.len() != missing_invs.len() { trace!(?missing_invs, ?missing_transaction_ids, ?pending_ids); - info!("unexpected notfound message from peer: notfound contains duplicate hashes or non-transaction hashes. Using partial received transactions as the peer response"); + debug!("unexpected notfound message from peer: notfound contains duplicate hashes or non-transaction hashes. Using partial received transactions as the peer response"); } if transactions.is_empty() { @@ -330,11 +334,11 @@ impl Handler { if missing_blocks != pending_hashes { trace!(?missing_invs, ?missing_blocks, ?pending_hashes); // if these errors are noisy, we should replace them with debugs - info!("unexpected notfound message from peer: all remaining block hashes should be listed in the notfound. Using partial received blocks as the peer response"); + debug!("unexpected notfound message from peer: all remaining block hashes should be listed in the notfound. Using partial received blocks as the peer response"); } if missing_blocks.len() != missing_invs.len() { trace!(?missing_invs, ?missing_blocks, ?pending_hashes); - info!("unexpected notfound message from peer: notfound contains duplicate hashes or non-block hashes. Using partial received blocks as the peer response"); + debug!("unexpected notfound message from peer: notfound contains duplicate hashes or non-block hashes. Using partial received blocks as the peer response"); } if blocks.is_empty() { @@ -447,7 +451,10 @@ impl From for InboundMessage { } /// The channels, services, and associated state for a peer connection. -pub struct Connection { +pub struct Connection +where + Tx: Sink + Unpin, +{ /// The metadata for the connected peer `service`. /// /// This field is used for debugging. @@ -508,9 +515,17 @@ pub struct Connection { /// The state for this peer, when the metrics were last updated. pub(super) last_metrics_state: Option>, + + /// The time of the last overload error response from the inbound + /// service to a request from this connection, + /// or None if this connection hasn't yet received an overload error. + last_overload_time: Option, } -impl fmt::Debug for Connection { +impl fmt::Debug for Connection +where + Tx: Sink + Unpin, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // skip the channels, they don't tell us anything useful f.debug_struct(std::any::type_name::>()) @@ -525,7 +540,10 @@ impl fmt::Debug for Connection { } } -impl Connection { +impl Connection +where + Tx: Sink + Unpin, +{ /// Return a new connection from its channels, services, and shared state. pub(crate) fn new( inbound_service: S, @@ -549,6 +567,7 @@ impl Connection { connection_tracker, metrics_label, last_metrics_state: None, + last_overload_time: None, } } } @@ -635,9 +654,9 @@ where // the request completes (or times out). match future::select(peer_rx.next(), self.client_rx.next()).await { Either::Left((None, _)) => { - self.fail_with(PeerError::ConnectionClosed); + self.fail_with(PeerError::ConnectionClosed).await; } - Either::Left((Some(Err(e)), _)) => self.fail_with(e), + Either::Left((Some(Err(e)), _)) => self.fail_with(e).await, Either::Left((Some(Ok(msg)), _)) => { let unhandled_msg = self.handle_message_as_request(msg).await; @@ -653,7 +672,8 @@ where // There are no requests to be flushed, // but we need to set an error and update metrics. - self.shutdown(PeerError::ClientDropped); + // (We don't want to log this error, because it's normal behaviour.) + self.shutdown_async(PeerError::ClientDropped).await; break; } Either::Right((Some(req), _)) => { @@ -743,8 +763,10 @@ where .instrument(span.clone()) .await { - Either::Right((None, _)) => self.fail_with(PeerError::ConnectionClosed), - Either::Right((Some(Err(e)), _)) => self.fail_with(e), + Either::Right((None, _)) => { + self.fail_with(PeerError::ConnectionClosed).await + } + Either::Right((Some(Err(e)), _)) => self.fail_with(e).await, Either::Right((Some(Ok(peer_msg)), _cancel)) => { self.update_state_metrics(format!("Out::Rsp::{}", peer_msg.command())); @@ -803,7 +825,7 @@ where // So we do the state request cleanup manually. let e = SharedPeerError::from(e); let _ = tx.send(Err(e.clone())); - self.fail_with(e); + self.fail_with(e).await; State::Failed } // Other request timeouts fail the request. @@ -830,6 +852,8 @@ where } } + // TODO: close peer_rx here, after changing it from a stream to a channel + let error = self.error_slot.try_get_error(); assert!( error.is_some(), @@ -839,18 +863,21 @@ where self.update_state_metrics(error.expect("checked is_some").to_string()); } - /// Fail this connection. + /// Fail this connection, log the failure, and shut it down. + /// See [`Self::shutdown_async()`] for details. /// - /// If the connection has errored already, re-use the original error. - /// Otherwise, fail the connection with `error`. - fn fail_with(&mut self, error: impl Into) { + /// Use [`Self::shutdown_async()`] to avoid logging the failure, + /// and [`Self::shutdown()`] from non-async code. + async fn fail_with(&mut self, error: impl Into) { let error = error.into(); - debug!(%error, - client_receiver = ?self.client_rx, - "failing peer service with error"); + debug!( + %error, + client_receiver = ?self.client_rx, + "failing peer service with error" + ); - self.shutdown(error); + self.shutdown_async(error).await; } /// Handle an internal client request, possibly generating outgoing messages to the @@ -1042,7 +1069,7 @@ where Err(error) => { let error = SharedPeerError::from(error); let _ = tx.send(Err(error.clone())); - self.fail_with(error); + self.fail_with(error).await; } }; } @@ -1065,17 +1092,17 @@ where Message::Ping(nonce) => { trace!(?nonce, "responding to heartbeat"); if let Err(e) = self.peer_tx.send(Message::Pong(nonce)).await { - self.fail_with(e); + self.fail_with(e).await; } Consumed } // These messages shouldn't be sent outside of a handshake. Message::Version { .. } => { - self.fail_with(PeerError::DuplicateHandshake); + self.fail_with(PeerError::DuplicateHandshake).await; Consumed } Message::Verack { .. } => { - self.fail_with(PeerError::DuplicateHandshake); + self.fail_with(PeerError::DuplicateHandshake).await; Consumed } // These messages should already be handled as a response if they @@ -1242,7 +1269,6 @@ where /// of connected peers. async fn drive_peer_request(&mut self, req: Request) { trace!(?req); - use tower::{load_shed::error::Overloaded, ServiceExt}; // Add a metric for inbound requests metrics::counter!( @@ -1257,30 +1283,41 @@ where // before sending the next inbound request. tokio::task::yield_now().await; + // # Security + // + // Holding buffer slots for a long time can cause hangs: + // + // + // The inbound service must be called immediately after a buffer slot is reserved. if self.svc.ready().await.is_err() { - // Treat all service readiness errors as Overloaded - // TODO: treat `TryRecvError::Closed` in `Inbound::poll_ready` as a fatal error (#1655) - self.fail_with(PeerError::Overloaded); + self.fail_with(PeerError::ServiceShutdown).await; return; } let rsp = match self.svc.call(req.clone()).await { Err(e) => { - if e.is::() { - tracing::info!( - remote_user_agent = ?self.connection_info.remote.user_agent, - negotiated_version = ?self.connection_info.negotiated_version, - peer = ?self.metrics_label, - last_peer_state = ?self.last_metrics_state, - // TODO: remove this detailed debug info once #6506 is fixed - remote_height = ?self.connection_info.remote.start_height, - cached_addrs = ?self.cached_addrs.len(), - connection_state = ?self.state, - "inbound service is overloaded, closing connection", - ); + if e.is::() { + // # Security + // + // The peer request queue must have a limited length. + // The buffer and load shed layers are added in `start::start()`. + tracing::debug!("inbound service is overloaded, may close connection"); + + let now = Instant::now(); + + self.handle_inbound_overload(req, now, PeerError::Overloaded) + .await; + } else if e.is::() { + // # Security + // + // Peer requests must have a timeout. + // The timeout layer is added in `start::start()`. + tracing::info!(%req, "inbound service request timed out, may close connection"); + + let now = Instant::now(); - metrics::counter!("pool.closed.loadshed", 1); - self.fail_with(PeerError::Overloaded); + self.handle_inbound_overload(req, now, PeerError::InboundTimeout) + .await; } else { // We could send a reject to the remote peer, but that might cause // them to disconnect, and we might be using them to sync blocks. @@ -1292,7 +1329,9 @@ where client_receiver = ?self.client_rx, "error processing peer request", ); + self.update_state_metrics(format!("In::Req::{}/Rsp::Error", req.command())); } + return; } Ok(rsp) => rsp, @@ -1307,11 +1346,12 @@ where ); self.update_state_metrics(format!("In::Rsp::{}", rsp.command())); + // TODO: split response handler into its own method match rsp.clone() { Response::Nil => { /* generic success, do nothing */ } Response::Peers(addrs) => { if let Err(e) = self.peer_tx.send(Message::Addr(addrs)).await { - self.fail_with(e); + self.fail_with(e).await; } } Response::Transactions(transactions) => { @@ -1323,7 +1363,7 @@ where match transaction { Available(transaction) => { if let Err(e) = self.peer_tx.send(Message::Tx(transaction)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1333,7 +1373,7 @@ where if !missing_ids.is_empty() { if let Err(e) = self.peer_tx.send(Message::NotFound(missing_ids)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1347,7 +1387,7 @@ where match block { Available(block) => { if let Err(e) = self.peer_tx.send(Message::Block(block)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1357,7 +1397,7 @@ where if !missing_hashes.is_empty() { if let Err(e) = self.peer_tx.send(Message::NotFound(missing_hashes)).await { - self.fail_with(e); + self.fail_with(e).await; return; } } @@ -1368,12 +1408,12 @@ where .send(Message::Inv(hashes.into_iter().map(Into::into).collect())) .await { - self.fail_with(e) + self.fail_with(e).await } } Response::BlockHeaders(headers) => { if let Err(e) = self.peer_tx.send(Message::Headers(headers)).await { - self.fail_with(e) + self.fail_with(e).await } } Response::TransactionIds(hashes) => { @@ -1401,7 +1441,7 @@ where .collect(); if let Err(e) = self.peer_tx.send(Message::Inv(hashes)).await { - self.fail_with(e) + self.fail_with(e).await } } } @@ -1412,9 +1452,106 @@ where // before checking the connection for the next inbound or outbound request. tokio::task::yield_now().await; } + + /// Handle inbound service overload and timeout error responses by randomly terminating some + /// connections. + /// + /// # Security + /// + /// When the inbound service is overloaded with requests, Zebra needs to drop some connections, + /// to reduce the load on the application. But dropping every connection that receives an + /// `Overloaded` error from the inbound service could cause Zebra to drop too many peer + /// connections, and stop itself downloading blocks or transactions. + /// + /// Malicious or misbehaving peers can also overload the inbound service, and make Zebra drop + /// its connections to other peers. + /// + /// So instead, Zebra drops some overloaded connections at random. If a connection has recently + /// overloaded the inbound service, it is more likely to be dropped. This makes it harder for a + /// single peer (or multiple peers) to perform a denial of service attack. + /// + /// The inbound connection rate-limit also makes it hard for multiple peers to perform this + /// attack, because each inbound connection can only send one inbound request before its + /// probability of being disconnected increases. + async fn handle_inbound_overload(&mut self, req: Request, now: Instant, error: PeerError) { + let prev = self.last_overload_time.replace(now); + let drop_connection_probability = overload_drop_connection_probability(now, prev); + + if thread_rng().gen::() < drop_connection_probability { + if matches!(error, PeerError::Overloaded) { + metrics::counter!("pool.closed.loadshed", 1); + } else { + metrics::counter!("pool.closed.inbound.timeout", 1); + } + + tracing::info!( + drop_connection_probability = format!("{drop_connection_probability:.3}"), + remote_user_agent = ?self.connection_info.remote.user_agent, + negotiated_version = ?self.connection_info.negotiated_version, + peer = ?self.metrics_label, + last_peer_state = ?self.last_metrics_state, + // TODO: remove this detailed debug info once #6506 is fixed + remote_height = ?self.connection_info.remote.start_height, + cached_addrs = ?self.cached_addrs.len(), + connection_state = ?self.state, + "inbound service {error} error, closing connection", + ); + + self.update_state_metrics(format!("In::Req::{}/Rsp::{error}::Error", req.command())); + self.fail_with(error).await; + } else { + self.update_state_metrics(format!("In::Req::{}/Rsp::{error}::Ignored", req.command())); + + if matches!(error, PeerError::Overloaded) { + metrics::counter!("pool.ignored.loadshed", 1); + } else { + metrics::counter!("pool.ignored.inbound.timeout", 1); + } + } + } } -impl Connection { +/// Returns the probability of dropping a connection where the last overload was at `prev`, +/// and the current overload is `now`. +/// +/// # Security +/// +/// Connections that haven't seen an overload error in the past OVERLOAD_PROTECTION_INTERVAL +/// have a small chance of being closed (MIN_OVERLOAD_DROP_PROBABILITY). +/// +/// Connections that have seen a previous overload error in that time +/// have a higher chance of being dropped up to MAX_OVERLOAD_DROP_PROBABILITY. +/// This probability increases quadratically, so peers that send lots of inbound +/// requests are more likely to be dropped. +/// +/// ## Examples +/// +/// If a connection sends multiple overloads close together, it is very likely to be +/// disconnected. If a connection has two overloads multiple seconds apart, it is unlikely +/// to be disconnected. +fn overload_drop_connection_probability(now: Instant, prev: Option) -> f32 { + let Some(prev) = prev else { + return MIN_OVERLOAD_DROP_PROBABILITY; + }; + + let protection_fraction_since_last_overload = + (now - prev).as_secs_f32() / OVERLOAD_PROTECTION_INTERVAL.as_secs_f32(); + + // Quadratically increase the disconnection probability for very recent overloads. + // Negative values are ignored by clamping to MIN_OVERLOAD_DROP_PROBABILITY. + let overload_fraction = protection_fraction_since_last_overload.powi(2); + + let probability_range = MAX_OVERLOAD_DROP_PROBABILITY - MIN_OVERLOAD_DROP_PROBABILITY; + let raw_drop_probability = + MAX_OVERLOAD_DROP_PROBABILITY - (overload_fraction * probability_range); + + raw_drop_probability.clamp(MIN_OVERLOAD_DROP_PROBABILITY, MAX_OVERLOAD_DROP_PROBABILITY) +} + +impl Connection +where + Tx: Sink + Unpin, +{ /// Update the connection state metrics for this connection, /// using `extra_state_info` as additional state information. fn update_state_metrics(&mut self, extra_state_info: impl Into>) { @@ -1453,18 +1590,32 @@ impl Connection { } } - /// Marks the peer as having failed with `error`, and performs connection cleanup. + /// Marks the peer as having failed with `error`, and performs connection cleanup, + /// including async channel closes. /// /// If the connection has errored already, re-use the original error. /// Otherwise, fail the connection with `error`. + async fn shutdown_async(&mut self, error: impl Into) { + // Close async channels first, so other tasks can start shutting down. + // There's nothing we can do about errors while shutting down, and some errors are expected. + // + // TODO: close peer_tx and peer_rx in shutdown() and Drop, after: + // - using channels instead of streams/sinks? + // - exposing the underlying implementation rather than using generics and closures? + // - adding peer_rx to the connection struct (optional) + let _ = self.peer_tx.close().await; + + self.shutdown(error); + } + + /// Marks the peer as having failed with `error`, and performs connection cleanup. + /// See [`Self::shutdown_async()`] for details. + /// + /// Call [`Self::shutdown_async()`] in async code, because it can shut down more channels. fn shutdown(&mut self, error: impl Into) { let mut error = error.into(); // Close channels first, so other tasks can start shutting down. - // - // TODO: close peer_tx and peer_rx, after: - // - adapting them using a struct with a Stream impl, rather than closures - // - making the struct forward `close` to the inner channel self.client_rx.close(); // Update the shared error slot @@ -1532,7 +1683,10 @@ impl Connection { } } -impl Drop for Connection { +impl Drop for Connection +where + Tx: Sink + Unpin, +{ fn drop(&mut self) { self.shutdown(PeerError::ConnectionDropped); diff --git a/zebra-network/src/peer/connection/peer_tx.rs b/zebra-network/src/peer/connection/peer_tx.rs index 7e17196d95d..47df6504903 100644 --- a/zebra-network/src/peer/connection/peer_tx.rs +++ b/zebra-network/src/peer/connection/peer_tx.rs @@ -1,6 +1,6 @@ //! The peer message sender channel. -use futures::{Sink, SinkExt}; +use futures::{FutureExt, Sink, SinkExt}; use zebra_chain::serialization::SerializationError; @@ -10,7 +10,10 @@ use crate::{constants::REQUEST_TIMEOUT, protocol::external::Message, PeerError}; /// /// Used to apply a timeout to send messages. #[derive(Clone, Debug)] -pub struct PeerTx { +pub struct PeerTx +where + Tx: Sink + Unpin, +{ /// A channel for sending Zcash messages to the connected peer. /// /// This channel accepts [`Message`]s. @@ -28,10 +31,28 @@ where .map_err(|_| PeerError::ConnectionSendTimeout)? .map_err(Into::into) } + + /// Flush any remaining output and close this [`PeerTx`], if necessary. + pub async fn close(&mut self) -> Result<(), SerializationError> { + self.inner.close().await + } } -impl From for PeerTx { +impl From for PeerTx +where + Tx: Sink + Unpin, +{ fn from(tx: Tx) -> Self { PeerTx { inner: tx } } } + +impl Drop for PeerTx +where + Tx: Sink + Unpin, +{ + fn drop(&mut self) { + // Do a last-ditch close attempt on the sink + self.close().now_or_never(); + } +} diff --git a/zebra-network/src/peer/connection/tests/prop.rs b/zebra-network/src/peer/connection/tests/prop.rs index 9fc0390a981..3c4b2d51ca2 100644 --- a/zebra-network/src/peer/connection/tests/prop.rs +++ b/zebra-network/src/peer/connection/tests/prop.rs @@ -112,7 +112,7 @@ proptest! { // Check the state after the response let error = shared_error_slot.try_get_error(); - assert!(matches!(error, None)); + assert!(error.is_none()); inbound_service.expect_no_requests().await?; diff --git a/zebra-network/src/peer/connection/tests/vectors.rs b/zebra-network/src/peer/connection/tests/vectors.rs index 85ac7c854d1..4ab4db7af0a 100644 --- a/zebra-network/src/peer/connection/tests/vectors.rs +++ b/zebra-network/src/peer/connection/tests/vectors.rs @@ -4,22 +4,27 @@ //! - inbound message as request //! - inbound message, but not a request (or a response) -use std::{collections::HashSet, task::Poll, time::Duration}; +use std::{ + collections::HashSet, + task::Poll, + time::{Duration, Instant}, +}; use futures::{ channel::{mpsc, oneshot}, sink::SinkMapErr, - FutureExt, StreamExt, + FutureExt, SinkExt, StreamExt, }; - +use tower::load_shed::error::Overloaded; use tracing::Span; + use zebra_chain::serialization::SerializationError; use zebra_test::mock_service::{MockService, PanicAssertion}; use crate::{ - constants::REQUEST_TIMEOUT, + constants::{MAX_OVERLOAD_DROP_PROBABILITY, MIN_OVERLOAD_DROP_PROBABILITY, REQUEST_TIMEOUT}, peer::{ - connection::{Connection, State}, + connection::{overload_drop_connection_probability, Connection, State}, ClientRequest, ErrorSlot, }, protocol::external::Message, @@ -656,6 +661,230 @@ async fn connection_run_loop_receive_timeout() { assert_eq!(outbound_message, None); } +/// Check basic properties of overload probabilities +#[test] +fn overload_probability_reduces_over_time() { + let now = Instant::now(); + + // Edge case: previous is in the future due to OS monotonic clock bugs + let prev = now + Duration::from_secs(1); + assert_eq!( + overload_drop_connection_probability(now, Some(prev)), + MAX_OVERLOAD_DROP_PROBABILITY, + "if the overload time is in the future (OS bugs?), it should have maximum drop probability", + ); + + // Overload/DoS case/edge case: rapidly repeated overloads + let prev = now; + assert_eq!( + overload_drop_connection_probability(now, Some(prev)), + MAX_OVERLOAD_DROP_PROBABILITY, + "if the overload times are the same, overloads should have maximum drop probability", + ); + + // Overload/DoS case: rapidly repeated overloads + let prev = now - Duration::from_micros(1); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability <= MAX_OVERLOAD_DROP_PROBABILITY, + "if the overloads are very close together, drops can optionally decrease: {drop_probability} <= {MAX_OVERLOAD_DROP_PROBABILITY}", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", + ); + let last_probability = drop_probability; + + // Overload/DoS case: rapidly repeated overloads + let prev = now - Duration::from_millis(1); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", + ); + let last_probability = drop_probability; + + // Overload/DoS case: rapidly repeated overloads + let prev = now - Duration::from_millis(10); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.001, + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", + ); + let last_probability = drop_probability; + + // Overload case: frequent overloads + let prev = now - Duration::from_millis(100); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability < 0.01, + "if the overloads are very close together, drops can only decrease slightly: {drop_probability}", + ); + let last_probability = drop_probability; + + // Overload case: occasional but repeated overloads + let prev = now - Duration::from_secs(1); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", + ); + assert!( + MAX_OVERLOAD_DROP_PROBABILITY - drop_probability > 0.4, + "if the overloads are distant, drops should decrease a lot: {drop_probability}", + ); + let last_probability = drop_probability; + + // Overload case: occasional overloads + let prev = now - Duration::from_secs(5); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert!( + drop_probability < last_probability, + "if the overloads decrease, drops should decrease: {drop_probability} < {last_probability}", + ); + assert_eq!( + drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, + "if overloads are far apart, drops should have minimum drop probability: {drop_probability}", + ); + let _last_probability = drop_probability; + + // Base case: infrequent overloads + let prev = now - Duration::from_secs(10); + let drop_probability = overload_drop_connection_probability(now, Some(prev)); + assert_eq!( + drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, + "if overloads are far apart, drops should have minimum drop probability: {drop_probability}", + ); + + // Base case: no previous overload + let drop_probability = overload_drop_connection_probability(now, None); + assert_eq!( + drop_probability, MIN_OVERLOAD_DROP_PROBABILITY, + "if there is no previous overload time, overloads should have minimum drop probability: {drop_probability}", + ); +} + +/// Test that connections are randomly terminated in response to `Overloaded` errors. +/// +/// TODO: do a similar test on the real service stack created in the `start` command. +#[tokio::test(flavor = "multi_thread")] +async fn connection_is_randomly_disconnected_on_overload() { + let _init_guard = zebra_test::init(); + + // The number of times we repeat the test + const TEST_RUNS: usize = 220; + // The expected number of tests before a test failure due to random chance. + // Based on 10 tests per PR, 100 PR pushes per week, 50 weeks per year. + const TESTS_BEFORE_FAILURE: f32 = 50_000.0; + + let test_runs = TEST_RUNS.try_into().expect("constant fits in i32"); + // The probability of random test failure is: + // MIN_OVERLOAD_DROP_PROBABILITY^TEST_RUNS + MAX_OVERLOAD_DROP_PROBABILITY^TEST_RUNS + assert!( + 1.0 / MIN_OVERLOAD_DROP_PROBABILITY.powi(test_runs) > TESTS_BEFORE_FAILURE, + "not enough test runs: failures must be frequent enough to happen in almost all tests" + ); + assert!( + 1.0 / MAX_OVERLOAD_DROP_PROBABILITY.powi(test_runs) > TESTS_BEFORE_FAILURE, + "not enough test runs: successes must be frequent enough to happen in almost all tests" + ); + + let mut connection_continues = 0; + let mut connection_closes = 0; + + for _ in 0..TEST_RUNS { + // The real stream and sink are from a split TCP connection, + // but that doesn't change how the state machine behaves. + let (mut peer_tx, peer_rx) = mpsc::channel(1); + + let ( + connection, + _client_tx, + mut inbound_service, + mut peer_outbound_messages, + shared_error_slot, + ) = new_test_connection(); + + // The connection hasn't run so it must not have errors + let error = shared_error_slot.try_get_error(); + assert!( + error.is_none(), + "unexpected error before starting the connection event loop: {error:?}", + ); + + // Start the connection run loop future in a spawned task + let connection_handle = tokio::spawn(connection.run(peer_rx)); + tokio::time::sleep(Duration::from_millis(1)).await; + + // The connection hasn't received any messages, so it must not have errors + let error = shared_error_slot.try_get_error(); + assert!( + error.is_none(), + "unexpected error before sending messages to the connection event loop: {error:?}", + ); + + // Simulate an overloaded connection error in response to an inbound request. + let inbound_req = Message::GetAddr; + peer_tx + .send(Ok(inbound_req)) + .await + .expect("send to channel always succeeds"); + tokio::time::sleep(Duration::from_millis(1)).await; + + // The connection hasn't got a response, so it must not have errors + let error = shared_error_slot.try_get_error(); + assert!( + error.is_none(), + "unexpected error before sending responses to the connection event loop: {error:?}", + ); + + inbound_service + .expect_request(Request::Peers) + .await + .respond_error(Overloaded::new().into()); + tokio::time::sleep(Duration::from_millis(1)).await; + + let outbound_result = peer_outbound_messages.try_next(); + assert!( + !matches!(outbound_result, Ok(Some(_))), + "unexpected outbound message after Overloaded error:\n\ + {outbound_result:?}\n\ + note: TryRecvErr means there are no messages, Ok(None) means the channel is closed" + ); + + let error = shared_error_slot.try_get_error(); + if error.is_some() { + connection_closes += 1; + } else { + connection_continues += 1; + } + + // We need to terminate the spawned task + connection_handle.abort(); + } + + assert!( + connection_closes > 0, + "some overloaded connections must be closed at random" + ); + assert!( + connection_continues > 0, + "some overloaded errors must be ignored at random" + ); +} + /// Creates a new [`Connection`] instance for unit tests. fn new_test_connection() -> ( Connection< diff --git a/zebra-network/src/peer/connector.rs b/zebra-network/src/peer/connector.rs index dd2342c7929..67947f9e448 100644 --- a/zebra-network/src/peer/connector.rs +++ b/zebra-network/src/peer/connector.rs @@ -92,6 +92,10 @@ where let connected_addr = ConnectedAddr::new_outbound_direct(addr); let connector_span = info_span!("connector", peer = ?connected_addr); + // # Security + // + // `zebra_network::init()` implements a connection timeout on this future. + // Any code outside this future does not have a timeout. async move { let tcp_stream = TcpStream::connect(*addr).await?; let client = hs diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index 0180c377d6b..6263fb56119 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -82,6 +82,15 @@ pub enum PeerError { #[error("Internal services over capacity")] Overloaded, + /// This peer request's caused an internal service timeout, so the connection was dropped + /// to shed load or prevent attacks. + #[error("Internal services timed out")] + InboundTimeout, + + /// This node's internal services are no longer able to service requests. + #[error("Internal services have failed or shutdown")] + ServiceShutdown, + /// We requested data, but the peer replied with a `notfound` message. /// (Or it didn't respond before the request finished.) /// @@ -138,6 +147,8 @@ impl PeerError { PeerError::Serialization(inner) => format!("Serialization({inner})").into(), PeerError::DuplicateHandshake => "DuplicateHandshake".into(), PeerError::Overloaded => "Overloaded".into(), + PeerError::InboundTimeout => "InboundTimeout".into(), + PeerError::ServiceShutdown => "ServiceShutdown".into(), PeerError::NotFoundResponse(_) => "NotFoundResponse".into(), PeerError::NotFoundRegistry(_) => "NotFoundRegistry".into(), } diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index 40c07493d64..692c9f56135 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -876,6 +876,10 @@ where let relay = self.relay; let minimum_peer_version = self.minimum_peer_version.clone(); + // # Security + // + // `zebra_network::init()` implements a connection timeout on this future. + // Any code outside this future does not have a timeout. let fut = async move { debug!( addr = ?connected_addr, @@ -914,6 +918,10 @@ where // addresses. Otherwise, malicious peers could interfere with the // address book state of other peers by providing their addresses in // `Version` messages. + // + // New alternate peer address and peer responded updates are rate-limited because: + // - opening connections is rate-limited + // - we only send these messages once per handshake let alternate_addrs = connected_addr.get_alternate_addrs(remote_canonical_addr); for alt_addr in alternate_addrs { let alt_addr = MetaAddr::new_alternate(alt_addr, &remote_services); @@ -1010,18 +1018,10 @@ where "addr" => connected_addr.get_transient_addr_label(), ); - if let Some(book_addr) = connected_addr.get_address_book_addr() { - if matches!(msg, Message::Ping(_) | Message::Pong(_)) { - // the collector doesn't depend on network activity, - // so this await should not hang - let _ = inbound_ts_collector - .send(MetaAddr::new_responded( - book_addr, - &remote_services, - )) - .await; - } - } + // # Security + // + // Peer messages are not rate-limited, so we can't send anything + // to a shared channel or do anything expensive here. } Err(err) => { metrics::counter!( @@ -1031,6 +1031,12 @@ where "addr" => connected_addr.get_transient_addr_label(), ); + // # Security + // + // Peer errors are rate-limited because: + // - opening connections is rate-limited + // - the number of connections is limited + // - after the first error, the peer is disconnected if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = inbound_ts_collector .send(MetaAddr::new_errored(book_addr, remote_services)) @@ -1154,7 +1160,7 @@ pub(crate) async fn register_inventory_status( let _ = inv_collector .send(InventoryChange::new_available(*advertised, transient_addr)); } - [advertised @ ..] => { + advertised => { let advertised = advertised .iter() .filter(|advertised| advertised.unmined_tx_id().is_some()); @@ -1295,6 +1301,20 @@ async fn send_periodic_heartbeats_run_loop( &remote_services, ) .await?; + + // # Security + // + // Peer heartbeats are rate-limited because: + // - opening connections is rate-limited + // - the number of connections is limited + // - Zebra initiates each heartbeat using a timer + if let Some(book_addr) = connected_addr.get_address_book_addr() { + // the collector doesn't depend on network activity, + // so this await should not hang + let _ = heartbeat_ts_collector + .send(MetaAddr::new_responded(book_addr, &remote_services)) + .await; + } } unreachable!("unexpected IntervalStream termination") @@ -1399,6 +1419,12 @@ where Err(err) => { tracing::debug!(?err, "heartbeat error, shutting down"); + // # Security + // + // Peer errors and shutdowns are rate-limited because: + // - opening connections is rate-limited + // - the number of connections is limited + // - after the first error or shutdown, the peer is disconnected if let Some(book_addr) = connected_addr.get_address_book_addr() { let _ = address_book_updater .send(MetaAddr::new_errored(book_addr, *remote_services)) diff --git a/zebra-network/src/peer_cache_updater.rs b/zebra-network/src/peer_cache_updater.rs new file mode 100644 index 00000000000..c213b87345d --- /dev/null +++ b/zebra-network/src/peer_cache_updater.rs @@ -0,0 +1,64 @@ +//! An async task that regularly updates the peer cache on disk from the current address book. + +use std::{ + io, + sync::{Arc, Mutex}, +}; + +use chrono::Utc; +use tokio::time::sleep; + +use crate::{ + constants::{DNS_LOOKUP_TIMEOUT, PEER_DISK_CACHE_UPDATE_INTERVAL}, + meta_addr::MetaAddr, + AddressBook, BoxError, Config, +}; + +/// An ongoing task that regularly caches the current `address_book` to disk, based on `config`. +#[instrument(skip(config, address_book))] +pub async fn peer_cache_updater( + config: Config, + address_book: Arc>, +) -> Result<(), BoxError> { + // Wait until we've queried DNS and (hopefully) sent peers to the address book. + // Ideally we'd wait for at least one peer crawl, but that makes tests very slow. + // + // TODO: turn the initial sleep time into a parameter of this function, + // and allow it to be set in tests + sleep(DNS_LOOKUP_TIMEOUT * 4).await; + + loop { + // Ignore errors because updating the cache is optional. + // Errors are already logged by the functions we're calling. + let _ = update_peer_cache_once(&config, &address_book).await; + + sleep(PEER_DISK_CACHE_UPDATE_INTERVAL).await; + } +} + +/// Caches peers from the current `address_book` to disk, based on `config`. +pub async fn update_peer_cache_once( + config: &Config, + address_book: &Arc>, +) -> io::Result<()> { + let peer_list = cacheable_peers(address_book) + .iter() + .map(|meta_addr| meta_addr.addr) + .collect(); + + config.update_peer_cache(peer_list).await +} + +/// Returns a list of cacheable peers, blocking for as short a time as possible. +fn cacheable_peers(address_book: &Arc>) -> Vec { + // TODO: use spawn_blocking() here, if needed to handle address book mutex load + let now = Utc::now(); + + // # Concurrency + // + // We return from this function immediately to make sure the address book is unlocked. + address_book + .lock() + .expect("unexpected panic in previous thread while accessing the address book") + .cacheable(now) +} diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index 042a92b27aa..f951bda5b9b 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -8,7 +8,7 @@ use tokio::time::{sleep_until, timeout, Instant}; use tower::{Service, ServiceExt}; use tracing::Span; -use zebra_chain::serialization::DateTime32; +use zebra_chain::{diagnostic::task::WaitForPanics, serialization::DateTime32}; use crate::{ constants, meta_addr::MetaAddrChange, peer_set::set::MorePeers, types::MetaAddr, AddressBook, @@ -125,7 +125,11 @@ mod tests; // When we add the Seed state: // * show that seed peers that transition to other never attempted // states are already in the address book -pub(crate) struct CandidateSet { +pub(crate) struct CandidateSet +where + S: Service + Send, + S::Future: Send + 'static, +{ // Correctness: the address book must be private, // so all operations are performed on a blocking thread (see #1976). address_book: Arc>, @@ -136,7 +140,7 @@ pub(crate) struct CandidateSet { impl CandidateSet where - S: Service, + S: Service + Send, S::Future: Send + 'static, { /// Uses `address_book` and `peer_service` to manage a [`CandidateSet`] of peers. @@ -180,8 +184,6 @@ where /// The handshaker sets up the peer message receiver so it also sends a /// [`Responded`] peer address update. /// - /// [`report_failed`][Self::report_failed] puts peers into the [`Failed`] state. - /// /// [`next`][Self::next] puts peers into the [`AttemptPending`] state. /// /// ## Security @@ -316,6 +318,12 @@ where /// Add new `addrs` to the address book. async fn send_addrs(&self, addrs: impl IntoIterator) { + // # Security + // + // New gossiped peers are rate-limited because: + // - Zebra initiates requests for new gossiped peers + // - the fanout is limited + // - the number of addresses per peer is limited let addrs: Vec = addrs .into_iter() .map(MetaAddr::new_gossiped_change) @@ -340,8 +348,8 @@ where tokio::task::spawn_blocking(move || { span.in_scope(|| address_book.lock().unwrap().extend(addrs)) }) + .wait_for_panics() .await - .expect("panic in new peers address book update task"); } /// Returns the next candidate for a connection attempt, if any are available. @@ -395,8 +403,8 @@ where // Correctness: Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). let span = Span::current(); let next_peer = tokio::task::spawn_blocking(move || span.in_scope(next_peer)) - .await - .expect("panic in next peer address book task")?; + .wait_for_panics() + .await?; // Security: rate-limit new outbound peer connections sleep_until(self.min_next_handshake).await; @@ -405,21 +413,9 @@ where Some(next_peer) } - /// Mark `addr` as a failed peer. - pub async fn report_failed(&mut self, addr: &MetaAddr) { - let addr = MetaAddr::new_errored(addr.addr, addr.services); - - // # Correctness - // - // Spawn address book accesses on a blocking thread, - // to avoid deadlocks (see #1976). - let address_book = self.address_book.clone(); - let span = Span::current(); - tokio::task::spawn_blocking(move || { - span.in_scope(|| address_book.lock().unwrap().update(addr)) - }) - .await - .expect("panic in peer failure address book update task"); + /// Returns the address book for this `CandidateSet`. + pub async fn address_book(&self) -> Arc> { + self.address_book.clone() } } diff --git a/zebra-network/src/peer_set/candidate_set/tests/prop.rs b/zebra-network/src/peer_set/candidate_set/tests/prop.rs index 394e35df6c3..e5201e046ba 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/prop.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/prop.rs @@ -8,15 +8,20 @@ use std::{ time::{Duration, Instant}, }; -use proptest::{collection::vec, prelude::*}; +use proptest::{ + collection::{hash_map, vec}, + prelude::*, +}; use tokio::time::{sleep, timeout}; use tracing::Span; use zebra_chain::{parameters::Network::*, serialization::DateTime32}; use crate::{ - constants::MIN_OUTBOUND_PEER_CONNECTION_INTERVAL, + canonical_peer_addr, + constants::{DEFAULT_MAX_CONNS_PER_IP, MIN_OUTBOUND_PEER_CONNECTION_INTERVAL}, meta_addr::{MetaAddr, MetaAddrChange}, + protocol::types::PeerServices, AddressBook, BoxError, Request, Response, }; @@ -67,7 +72,7 @@ proptest! { }); // Since the address book is empty, there won't be any available peers - let address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, Span::none()); + let address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, DEFAULT_MAX_CONNS_PER_IP, Span::none()); let mut candidate_set = CandidateSet::new(Arc::new(std::sync::Mutex::new(address_book)), peer_service); @@ -94,18 +99,22 @@ proptest! { /// Test that new outbound peer connections are rate-limited. #[test] fn new_outbound_peer_connections_are_rate_limited( - peers in vec(MetaAddrChange::ready_outbound_strategy(), TEST_ADDRESSES), + peers in hash_map(MetaAddrChange::ready_outbound_strategy_ip(), MetaAddrChange::ready_outbound_strategy_port(), TEST_ADDRESSES), initial_candidates in 0..MAX_TEST_CANDIDATES, extra_candidates in 0..MAX_TEST_CANDIDATES, ) { let (runtime, _init_guard) = zebra_test::init_async(); let _guard = runtime.enter(); + let peers = peers.into_iter().map(|(ip, port)| { + MetaAddr::new_alternate(canonical_peer_addr(SocketAddr::new(ip, port)), &PeerServices::NODE_NETWORK) + }).collect::>(); + let peer_service = tower::service_fn(|_| async { unreachable!("Mock peer service is never used"); }); - let mut address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, Span::none()); + let mut address_book = AddressBook::new(SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, DEFAULT_MAX_CONNS_PER_IP, Span::none()); address_book.extend(peers); let mut candidate_set = CandidateSet::new(Arc::new(std::sync::Mutex::new(address_book)), peer_service); @@ -139,7 +148,7 @@ proptest! { /// - if no reconnection peer is returned at all. async fn check_candidates_rate_limiting(candidate_set: &mut CandidateSet, candidates: u32) where - S: tower::Service, + S: tower::Service + Send, S::Future: Send + 'static, { let mut now = Instant::now(); diff --git a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs index 71811a75962..261fb2ff487 100644 --- a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs +++ b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs @@ -15,7 +15,7 @@ use zebra_chain::{parameters::Network::*, serialization::DateTime32}; use zebra_test::mock_service::{MockService, PanicAssertion}; use crate::{ - constants::{GET_ADDR_FANOUT, MIN_PEER_GET_ADDR_INTERVAL}, + constants::{DEFAULT_MAX_CONNS_PER_IP, GET_ADDR_FANOUT, MIN_PEER_GET_ADDR_INTERVAL}, types::{MetaAddr, PeerServices}, AddressBook, Request, Response, }; @@ -141,6 +141,7 @@ fn candidate_set_updates_are_rate_limited() { let address_book = AddressBook::new( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, Span::none(), ); let mut peer_service = MockService::build().for_unit_tests(); @@ -186,6 +187,7 @@ fn candidate_set_update_after_update_initial_is_rate_limited() { let address_book = AddressBook::new( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, Span::none(), ); let mut peer_service = MockService::build().for_unit_tests(); diff --git a/zebra-network/src/peer_set/initialize.rs b/zebra-network/src/peer_set/initialize.rs index f9b002f941c..6919b2ad09c 100644 --- a/zebra-network/src/peer_set/initialize.rs +++ b/zebra-network/src/peer_set/initialize.rs @@ -1,11 +1,14 @@ //! A peer set whose size is dynamically determined by resource constraints. - -// Portions of this submodule were adapted from tower-balance, -// which is (c) 2019 Tower Contributors (MIT licensed). +//! +//! The [`PeerSet`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! +//! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance use std::{ collections::{BTreeMap, HashSet}, + convert::Infallible, net::SocketAddr, + pin::Pin, sync::Arc, time::Duration, }; @@ -13,8 +16,8 @@ use std::{ use futures::{ future::{self, FutureExt}, sink::SinkExt, - stream::{FuturesUnordered, StreamExt, TryStreamExt}, - TryFutureExt, + stream::{FuturesUnordered, StreamExt}, + Future, TryFutureExt, }; use rand::seq::SliceRandom; use tokio::{ @@ -26,9 +29,10 @@ use tokio_stream::wrappers::IntervalStream; use tower::{ buffer::Buffer, discover::Change, layer::Layer, util::BoxService, Service, ServiceExt, }; +use tracing::Span; use tracing_futures::Instrument; -use zebra_chain::chain_tip::ChainTip; +use zebra_chain::{chain_tip::ChainTip, diagnostic::task::WaitForPanics}; use crate::{ address_book_updater::AddressBookUpdater, @@ -38,6 +42,7 @@ use crate::{ self, address_is_valid_for_inbound_listeners, HandshakeRequest, MinimumPeerVersion, OutboundConnectorRequest, PeerPreference, }, + peer_cache_updater::peer_cache_updater, peer_set::{set::MorePeers, ActiveConnectionCounter, CandidateSet, ConnectionTracker, PeerSet}, AddressBook, BoxError, Config, PeerSocketAddr, Request, Response, }; @@ -45,11 +50,17 @@ use crate::{ #[cfg(test)] mod tests; -/// The result of an outbound peer connection attempt or inbound connection -/// handshake. +mod recent_by_ip; + +/// A successful outbound peer connection attempt or inbound connection handshake. /// -/// This result comes from the `Handshaker`. -type DiscoveredPeer = Result<(PeerSocketAddr, peer::Client), BoxError>; +/// The [`Handshake`](peer::Handshake) service returns a [`Result`]. Only successful connections +/// should be sent on the channel. Errors should be logged or ignored. +/// +/// We don't allow any errors in this type, because: +/// - The connection limits don't include failed connections +/// - tower::Discover interprets an error as stream termination +type DiscoveredPeer = (PeerSocketAddr, peer::Client); /// Initialize a peer set, using a network `config`, `inbound_service`, /// and `latest_chain_tip`. @@ -93,17 +104,10 @@ pub async fn init( Arc>, ) where - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, S::Future: Send + 'static, - C: ChainTip + Clone + Send + 'static, + C: ChainTip + Clone + Send + Sync + 'static, { - // If we want Zebra to operate with no network, - // we should implement a `zebrad` command that doesn't use `zebra-network`. - assert!( - config.peerset_initial_target_size > 0, - "Zebra must be allowed to connect to at least one peer" - ); - let (tcp_listener, listen_addr) = open_listener(&config.clone()).await; let (address_book, address_book_updater, address_metrics, address_book_updater_guard) = @@ -145,14 +149,15 @@ where // Create an mpsc channel for peer changes, // based on the maximum number of inbound and outbound peers. + // + // The connection limit does not apply to errors, + // so they need to be handled before sending to this channel. let (peerset_tx, peerset_rx) = futures::channel::mpsc::channel::(config.peerset_total_connection_limit()); - let discovered_peers = peerset_rx - // Discover interprets an error as stream termination, - // so discard any errored connections... - .filter(|result| future::ready(result.is_ok())) - .map_ok(|(address, client)| Change::Insert(address, client.into())); + let discovered_peers = peerset_rx.map(|(address, client)| { + Result::<_, Infallible>::Ok(Change::Insert(address, client.into())) + }); // Create an mpsc channel for peerset demand signaling, // based on the maximum number of outbound peers. @@ -171,6 +176,7 @@ where inv_receiver, address_metrics, MinimumPeerVersion::new(latest_chain_tip, config.network), + None, ); let peer_set = Buffer::new(BoxService::new(peer_set), constants::PEERSET_BUFFER_SIZE); @@ -186,7 +192,7 @@ where ); let listen_guard = tokio::spawn(listen_fut.in_current_span()); - // 2. Initial peers, specified in the config. + // 2. Initial peers, specified in the config and cached on disk. let initial_peers_fut = add_initial_peers( config.clone(), outbound_connector.clone(), @@ -200,8 +206,8 @@ where // Wait for the initial seed peer count let mut active_outbound_connections = initial_peers_join + .wait_for_panics() .await - .expect("unexpected panic in spawned initial peers task") .expect("unexpected error connecting to initial peers"); let active_initial_peer_count = active_outbound_connections.update_count(); @@ -209,6 +215,9 @@ where // because zcashd rate-limits `addr`/`addrv2` messages per connection, // and if we only have one initial peer, // we need to ensure that its `Response::Addr` is used by the crawler. + // + // TODO: this might not be needed after we added the Connection peer address cache, + // try removing it in a future release? info!( ?active_initial_peer_count, "sending initial request for peers" @@ -224,8 +233,9 @@ where let _ = demand_tx.try_send(MorePeers); } + // Start the peer crawler let crawl_fut = crawl_and_dial( - config, + config.clone(), demand_tx, demand_rx, candidates, @@ -235,15 +245,24 @@ where ); let crawl_guard = tokio::spawn(crawl_fut.in_current_span()); + // Start the peer disk cache updater + let peer_cache_updater_fut = peer_cache_updater(config, address_book.clone()); + let peer_cache_updater_guard = tokio::spawn(peer_cache_updater_fut.in_current_span()); + handle_tx - .send(vec![listen_guard, crawl_guard, address_book_updater_guard]) + .send(vec![ + listen_guard, + crawl_guard, + address_book_updater_guard, + peer_cache_updater_guard, + ]) .unwrap(); (peer_set, address_book) } -/// Use the provided `outbound_connector` to connect to the configured initial peers, -/// then send the resulting peer connections over `peerset_tx`. +/// Use the provided `outbound_connector` to connect to the configured DNS seeder and +/// disk cache initial peers, then send the resulting peer connections over `peerset_tx`. /// /// Also sends every initial peer address to the `address_book_updater`. #[instrument(skip(config, outbound_connector, peerset_tx, address_book_updater))] @@ -273,9 +292,12 @@ where "Outbound Connections", ); + // TODO: update when we add Tor peers or other kinds of addresses. + let ipv4_peer_count = initial_peers.iter().filter(|ip| ip.is_ipv4()).count(); + let ipv6_peer_count = initial_peers.iter().filter(|ip| ip.is_ipv6()).count(); info!( - initial_peer_count = ?initial_peers.len(), - ?initial_peers, + ?ipv4_peer_count, + ?ipv6_peer_count, "connecting to initial peer set" ); @@ -321,14 +343,13 @@ where } .in_current_span(), ) + .wait_for_panics() }) .collect(); while let Some(handshake_result) = handshakes.next().await { - let handshake_result = - handshake_result.expect("unexpected panic in initial peer handshake"); match handshake_result { - Ok(ref change) => { + Ok(change) => { handshake_success_total += 1; debug!( ?handshake_success_total, @@ -336,6 +357,9 @@ where ?change, "an initial peer handshake succeeded" ); + + // The connection limit makes sure this send doesn't block + peerset_tx.send(change).await?; } Err((addr, ref e)) => { handshake_error_total += 1; @@ -370,10 +394,6 @@ where } } - peerset_tx - .send(handshake_result.map_err(|(_addr, e)| e)) - .await?; - // Security: Let other tasks run after each connection is processed. // // Avoids remote peers starving other Zebra tasks using initial connection successes or errors. @@ -385,7 +405,7 @@ where ?handshake_success_total, ?handshake_error_total, ?outbound_connections, - "finished connecting to initial seed peers" + "finished connecting to initial seed and disk cache peers" ); Ok(active_outbound_connections) @@ -423,16 +443,22 @@ async fn limit_initial_peers( .entry(preference) .or_default() .push(peer_addr), - Err(error) => warn!( + Err(error) => info!( ?peer_addr, ?error, - "invalid initial peer from DNS seeder or configured IP address", + "invalid initial peer from DNS seeder, configured IP address, or disk cache", ), } } // Send every initial peer to the address book, in preferred order. // (This treats initial peers the same way we treat gossiped peers.) + // + // # Security + // + // Initial peers are limited because: + // - the number of initial peers is limited + // - this code only runs once at startup for peer in preferred_peers.values().flatten() { let peer_addr = MetaAddr::new_initial_peer(*peer); // `send` only waits when the channel is full. @@ -522,7 +548,7 @@ async fn accept_inbound_connections( config: Config, listener: TcpListener, min_inbound_peer_connection_interval: Duration, - mut handshaker: S, + handshaker: S, peerset_tx: futures::channel::mpsc::Sender, ) -> Result<(), BoxError> where @@ -530,12 +556,16 @@ where + Clone, S::Future: Send + 'static, { + let mut recent_inbound_connections = + recent_by_ip::RecentByIp::new(None, Some(config.max_connections_per_ip)); + let mut active_inbound_connections = ActiveConnectionCounter::new_counter_with( config.peerset_inbound_connection_limit(), "Inbound Connections", ); - let mut handshakes = FuturesUnordered::new(); + let mut handshakes: FuturesUnordered + Send>>> = + FuturesUnordered::new(); // Keeping an unresolved future in the pool means the stream never terminates. handshakes.push(future::pending().boxed()); @@ -545,11 +575,11 @@ where biased; next_handshake_res = handshakes.next() => match next_handshake_res { // The task has already sent the peer change to the peer set. - Some(Ok(_)) => continue, - Some(Err(task_panic)) => panic!("panic in inbound handshake task: {task_panic:?}"), + Some(()) => continue, None => unreachable!("handshakes never terminates, because it contains a future that never resolves"), }, + // This future must wait until new connections are available: it can't have a timeout. inbound_result = listener.accept() => inbound_result, }; @@ -558,10 +588,14 @@ where if active_inbound_connections.update_count() >= config.peerset_inbound_connection_limit() + || recent_inbound_connections.is_past_limit_or_add(addr.ip()) { // Too many open inbound connections or pending handshakes already. // Close the connection. std::mem::drop(tcp_stream); + // Allow invalid connections to be cleared quickly, + // but still put a limit on our CPU and network usage from failed connections. + tokio::time::sleep(constants::MIN_INBOUND_PEER_FAILED_CONNECTION_INTERVAL).await; continue; } @@ -573,40 +607,17 @@ where "handshaking on an open inbound peer connection" ); - let connected_addr = peer::ConnectedAddr::new_inbound_direct(addr); - let accept_span = info_span!("listen_accept", peer = ?connected_addr); - let _guard = accept_span.enter(); - - debug!("got incoming connection"); - handshaker.ready().await?; - // TODO: distinguish between proxied listeners and direct listeners - let handshaker_span = info_span!("listen_handshaker", peer = ?connected_addr); - - // Construct a handshake future but do not drive it yet.... - let handshake = handshaker.call(HandshakeRequest { - data_stream: tcp_stream, - connected_addr, + let handshake_task = accept_inbound_handshake( + addr, + handshaker.clone(), + tcp_stream, connection_tracker, - }); - // ... instead, spawn a new task to handle this connection - { - let mut peerset_tx = peerset_tx.clone(); - - let handshake_task = tokio::spawn( - async move { - let handshake_result = handshake.await; - - if let Ok(client) = handshake_result { - let _ = peerset_tx.send(Ok((addr, client))).await; - } else { - debug!(?handshake_result, "error handshaking with inbound peer"); - } - } - .instrument(handshaker_span), - ); + peerset_tx.clone(), + ) + .await? + .wait_for_panics(); - handshakes.push(Box::pin(handshake_task)); - } + handshakes.push(handshake_task); // Rate-limit inbound connection handshakes. // But sleep longer after a successful connection, @@ -636,24 +647,80 @@ where } } +/// Set up a new inbound connection as a Zcash peer. +/// +/// Uses `handshaker` to perform a Zcash network protocol handshake, and sends +/// the [`peer::Client`] result over `peerset_tx`. +// +// TODO: when we support inbound proxies, distinguish between proxied listeners and +// direct listeners in the span generated by this instrument macro +#[instrument(skip(handshaker, tcp_stream, connection_tracker, peerset_tx))] +async fn accept_inbound_handshake( + addr: PeerSocketAddr, + mut handshaker: S, + tcp_stream: TcpStream, + connection_tracker: ConnectionTracker, + peerset_tx: futures::channel::mpsc::Sender, +) -> Result, BoxError> +where + S: Service, Response = peer::Client, Error = BoxError> + + Clone, + S::Future: Send + 'static, +{ + let connected_addr = peer::ConnectedAddr::new_inbound_direct(addr); + + debug!("got incoming connection"); + + // # Correctness + // + // Holding the drop guard returned by Span::enter across .await points will + // result in incorrect traces if it yields. + // + // This await is okay because the handshaker's `poll_ready` method always returns Ready. + handshaker.ready().await?; + + // Construct a handshake future but do not drive it yet.... + let handshake = handshaker.call(HandshakeRequest { + data_stream: tcp_stream, + connected_addr, + connection_tracker, + }); + // ... instead, spawn a new task to handle this connection + let mut peerset_tx = peerset_tx.clone(); + + let handshake_task = tokio::spawn( + async move { + let handshake_result = handshake.await; + + if let Ok(client) = handshake_result { + // The connection limit makes sure this send doesn't block + let _ = peerset_tx.send((addr, client)).await; + } else { + debug!(?handshake_result, "error handshaking with inbound peer"); + } + } + .in_current_span(), + ); + + Ok(handshake_task) +} + /// An action that the peer crawler can take. enum CrawlerAction { /// Drop the demand signal because there are too many pending handshakes. DemandDrop, - /// Initiate a handshake to `candidate` in response to demand. - DemandHandshake { candidate: MetaAddr }, - /// Crawl existing peers for more peers in response to demand, because there - /// are no available candidates. - DemandCrawl, + /// Initiate a handshake to the next candidate peer in response to demand. + /// + /// If there are no available candidates, crawl existing peers. + DemandHandshakeOrCrawl, /// Crawl existing peers for more peers in response to a timer `tick`. TimerCrawl { tick: Instant }, - /// Handle a successfully connected handshake `peer_set_change`. - HandshakeConnected { - address: PeerSocketAddr, - client: peer::Client, - }, - /// Handle a handshake failure to `failed_addr`. - HandshakeFailed { failed_addr: MetaAddr }, + /// Clear a finished handshake. + HandshakeFinished, + /// Clear a finished demand crawl (DemandHandshakeOrCrawl with no peers). + DemandCrawlFinished, + /// Clear a finished TimerCrawl. + TimerCrawlFinished, } /// Given a channel `demand_rx` that signals a need for new peers, try to find @@ -689,11 +756,11 @@ enum CrawlerAction { )] async fn crawl_and_dial( config: Config, - mut demand_tx: futures::channel::mpsc::Sender, + demand_tx: futures::channel::mpsc::Sender, mut demand_rx: futures::channel::mpsc::Receiver, - mut candidates: CandidateSet, + candidates: CandidateSet, outbound_connector: C, - mut peerset_tx: futures::channel::mpsc::Sender, + peerset_tx: futures::channel::mpsc::Sender, mut active_outbound_connections: ActiveConnectionCounter, ) -> Result<(), BoxError> where @@ -705,31 +772,32 @@ where + Send + 'static, C::Future: Send + 'static, - S: Service, + S: Service + Send + Sync + 'static, S::Future: Send + 'static, { use CrawlerAction::*; - // CORRECTNESS - // - // To avoid hangs and starvation, the crawler must: - // - spawn a separate task for each crawl and handshake, so they can make - // progress independently (and avoid deadlocking each other) - // - use the `select!` macro for all actions, because the `select` function - // is biased towards the first ready future - info!( crawl_new_peer_interval = ?config.crawl_new_peer_interval, outbound_connections = ?active_outbound_connections.update_count(), "starting the peer address crawler", ); - let mut handshakes = FuturesUnordered::new(); + let address_book = candidates.address_book().await; + + // # Concurrency + // + // Allow tasks using the candidate set to be spawned, so they can run concurrently. + // Previously, Zebra has had deadlocks and long hangs caused by running dependent + // candidate set futures in the same async task. + let candidates = Arc::new(futures::lock::Mutex::new(candidates)); + + // This contains both crawl and handshake tasks. + let mut handshakes: FuturesUnordered< + Pin> + Send>>, + > = FuturesUnordered::new(); // returns None when empty. - // Keeping an unresolved future in the pool means the stream - // never terminates. - // We could use StreamExt::select_next_some and StreamExt::fuse, but `fuse` - // prevents us from adding items to the stream and checking its length. + // Keeping an unresolved future in the pool means the stream never terminates. handshakes.push(future::pending().boxed()); let mut crawl_timer = tokio::time::interval(config.crawl_new_peer_interval); @@ -739,6 +807,10 @@ where let mut crawl_timer = IntervalStream::new(crawl_timer).map(|tick| TimerCrawl { tick }); + // # Concurrency + // + // To avoid hangs and starvation, the crawler must spawn a separate task for each crawl + // and handshake, so they can make progress independently (and avoid deadlocking each other). loop { metrics::gauge!( "crawler.in_flight_handshakes", @@ -749,33 +821,45 @@ where ); let crawler_action = tokio::select! { + biased; + // Check for completed handshakes first, because the rest of the app needs them. + // Pending handshakes are limited by the connection limit. next_handshake_res = handshakes.next() => next_handshake_res.expect( "handshakes never terminates, because it contains a future that never resolves" ), - next_timer = crawl_timer.next() => next_timer.expect("timers never terminate"), - // turn the demand into an action, based on the crawler's current state - _ = demand_rx.next() => { + // The timer is rate-limited + next_timer = crawl_timer.next() => Ok(next_timer.expect("timers never terminate")), + // Turn any new demand into an action, based on the crawler's current state. + // + // # Concurrency + // + // Demand is potentially unlimited, so it must go last in a biased select!. + next_demand = demand_rx.next() => next_demand.ok_or("demand stream closed, is Zebra shutting down?".into()).map(|MorePeers|{ if active_outbound_connections.update_count() >= config.peerset_outbound_connection_limit() { // Too many open outbound connections or pending handshakes already DemandDrop - } else if let Some(candidate) = candidates.next().await { - // candidates.next has a short delay, and briefly holds the address - // book lock, so it shouldn't hang - DemandHandshake { candidate } } else { - DemandCrawl + DemandHandshakeOrCrawl } - } + }) }; match crawler_action { - DemandDrop => { + // Dummy actions + Ok(DemandDrop) => { // This is set to trace level because when the peerset is - // congested it can generate a lot of demand signal very - // rapidly. + // congested it can generate a lot of demand signal very rapidly. trace!("too many open connections or in-flight handshakes, dropping demand signal"); } - DemandHandshake { candidate } => { + + // Spawned tasks + Ok(DemandHandshakeOrCrawl) => { + let candidates = candidates.clone(); + let outbound_connector = outbound_connector.clone(); + let peerset_tx = peerset_tx.clone(); + let address_book = address_book.clone(); + let demand_tx = demand_tx.clone(); + // Increment the connection count before we spawn the connection. let outbound_connection_tracker = active_outbound_connections.track_connection(); debug!( @@ -783,74 +867,85 @@ where "opening an outbound peer connection" ); - // Spawn each handshake into an independent task, so it can make - // progress independently of the crawls. - let hs_join = tokio::spawn(dial( - candidate, - outbound_connector.clone(), - outbound_connection_tracker, - )) - .map(move |res| match res { - Ok(crawler_action) => crawler_action, - Err(e) => { - panic!("panic during handshaking with {candidate:?}: {e:?} "); + // Spawn each handshake or crawl into an independent task, so handshakes can make + // progress while crawls are running. + let handshake_or_crawl_handle = tokio::spawn( + async move { + // Try to get the next available peer for a handshake. + // + // candidates.next() has a short timeout, and briefly holds the address + // book lock, so it shouldn't hang. + // + // Hold the lock for as short a time as possible. + let candidate = { candidates.lock().await.next().await }; + + if let Some(candidate) = candidate { + // we don't need to spawn here, because there's nothing running concurrently + dial( + candidate, + outbound_connector, + outbound_connection_tracker, + peerset_tx, + address_book, + demand_tx, + ) + .await?; + + Ok(HandshakeFinished) + } else { + // There weren't any peers, so try to get more peers. + debug!("demand for peers but no available candidates"); + + crawl(candidates, demand_tx).await?; + + Ok(DemandCrawlFinished) + } } - }) - .in_current_span(); + .in_current_span(), + ) + .wait_for_panics(); - handshakes.push(Box::pin(hs_join)); + handshakes.push(handshake_or_crawl_handle); } - DemandCrawl => { - debug!("demand for peers but no available candidates"); - // update has timeouts, and briefly holds the address book - // lock, so it shouldn't hang - // - // TODO: refactor candidates into a buffered service, so we can - // spawn independent tasks to avoid deadlocks - let more_peers = candidates.update().await?; - - // If we got more peers, try to connect to a new peer. - // - // # Security - // - // Update attempts are rate-limited by the candidate set. - // - // We only try peers if there was actually an update. - // So if all peers have had a recent attempt, - // and there was recent update with no peers, - // the channel will drain. - // This prevents useless update attempt loops. - if let Some(more_peers) = more_peers { - let _ = demand_tx.try_send(more_peers); - } + Ok(TimerCrawl { tick }) => { + let candidates = candidates.clone(); + let demand_tx = demand_tx.clone(); + + let crawl_handle = tokio::spawn( + async move { + debug!( + ?tick, + "crawling for more peers in response to the crawl timer" + ); + + crawl(candidates, demand_tx).await?; + + Ok(TimerCrawlFinished) + } + .in_current_span(), + ) + .wait_for_panics(); + + handshakes.push(crawl_handle); } - TimerCrawl { tick } => { - debug!( - ?tick, - "crawling for more peers in response to the crawl timer" - ); - // TODO: spawn independent tasks to avoid deadlocks - candidates.update().await?; - // Try to connect to a new peer. - let _ = demand_tx.try_send(MorePeers); + + // Completed spawned tasks + Ok(HandshakeFinished) => { + // Already logged in dial() } - HandshakeConnected { address, client } => { - debug!(candidate.addr = ?address, "successfully dialed new peer"); - // successes are handled by an independent task, except for `candidates.update` in - // this task, which has a timeout, so they shouldn't hang - peerset_tx.send(Ok((address, client))).await?; + Ok(DemandCrawlFinished) => { + // This is set to trace level because when the peerset is + // congested it can generate a lot of demand signal very rapidly. + trace!("demand-based crawl finished"); + } + Ok(TimerCrawlFinished) => { + debug!("timer-based crawl finished"); } - HandshakeFailed { failed_addr } => { - // The connection was never opened, or it failed the handshake and was dropped. - - debug!(?failed_addr.addr, "marking candidate as failed"); - candidates.report_failed(&failed_addr).await; - // The demand signal that was taken out of the queue - // to attempt to connect to the failed candidate never - // turned into a connection, so add it back: - // - // Security: handshake failures are rate-limited by peer attempt timeouts. - let _ = demand_tx.try_send(MorePeers); + + // Fatal errors and shutdowns + Err(error) => { + info!(?error, "crawler task exiting due to an error"); + return Err(error); } } @@ -861,17 +956,79 @@ where } } +/// Try to get more peers using `candidates`, then queue a connection attempt using `demand_tx`. +/// If there were no new peers, the connection attempt is skipped. +#[instrument(skip(candidates, demand_tx))] +async fn crawl( + candidates: Arc>>, + mut demand_tx: futures::channel::mpsc::Sender, +) -> Result<(), BoxError> +where + S: Service + Send + Sync + 'static, + S::Future: Send + 'static, +{ + // update() has timeouts, and briefly holds the address book + // lock, so it shouldn't hang. + // Try to get new peers, holding the lock for as short a time as possible. + let result = { + let result = candidates.lock().await.update().await; + std::mem::drop(candidates); + result + }; + let more_peers = match result { + Ok(more_peers) => more_peers, + Err(e) => { + info!( + ?e, + "candidate set returned an error, is Zebra shutting down?" + ); + return Err(e); + } + }; + + // If we got more peers, try to connect to a new peer on our next loop. + // + // # Security + // + // Update attempts are rate-limited by the candidate set, + // and we only try peers if there was actually an update. + // + // So if all peers have had a recent attempt, and there was recent update + // with no peers, the channel will drain. This prevents useless update attempt + // loops. + if let Some(more_peers) = more_peers { + if let Err(send_error) = demand_tx.try_send(more_peers) { + if send_error.is_disconnected() { + // Zebra is shutting down + return Err(send_error.into()); + } + } + } + + Ok(()) +} + /// Try to connect to `candidate` using `outbound_connector`. /// Uses `outbound_connection_tracker` to track the active connection count. /// -/// Returns a `HandshakeConnected` action on success, and a -/// `HandshakeFailed` action on error. -#[instrument(skip(outbound_connector, outbound_connection_tracker))] +/// On success, sends peers to `peerset_tx`. +/// On failure, marks the peer as failed in the address book, +/// then re-adds demand to `demand_tx`. +#[instrument(skip( + outbound_connector, + outbound_connection_tracker, + peerset_tx, + address_book, + demand_tx +))] async fn dial( candidate: MetaAddr, mut outbound_connector: C, outbound_connection_tracker: ConnectionTracker, -) -> CrawlerAction + mut peerset_tx: futures::channel::mpsc::Sender, + address_book: Arc>, + mut demand_tx: futures::channel::mpsc::Sender, +) -> Result<(), BoxError> where C: Service< OutboundConnectorRequest, @@ -882,7 +1039,7 @@ where + 'static, C::Future: Send + 'static, { - // CORRECTNESS + // # Correctness // // To avoid hangs, the dialer must only await: // - functions that return immediately, or @@ -891,10 +1048,7 @@ where debug!(?candidate.addr, "attempting outbound connection in response to demand"); // the connector is always ready, so this can't hang - let outbound_connector = outbound_connector - .ready() - .await - .expect("outbound connector never errors"); + let outbound_connector = outbound_connector.ready().await?; let req = OutboundConnectorRequest { addr: candidate.addr, @@ -902,24 +1056,57 @@ where }; // the handshake has timeouts, so it shouldn't hang - outbound_connector - .call(req) - .map_err(|e| (candidate, e)) - .map(Into::into) - .await -} + let handshake_result = outbound_connector.call(req).map(Into::into).await; + + match handshake_result { + Ok((address, client)) => { + debug!(?candidate.addr, "successfully dialed new peer"); -impl From> for CrawlerAction { - fn from(dial_result: Result<(PeerSocketAddr, peer::Client), (MetaAddr, BoxError)>) -> Self { - use CrawlerAction::*; - match dial_result { - Ok((address, client)) => HandshakeConnected { address, client }, - Err((candidate, e)) => { - debug!(?candidate.addr, ?e, "failed to connect to candidate"); - HandshakeFailed { - failed_addr: candidate, + // The connection limit makes sure this send doesn't block. + peerset_tx.send((address, client)).await?; + } + // The connection was never opened, or it failed the handshake and was dropped. + Err(error) => { + debug!(?error, ?candidate.addr, "failed to make outbound connection to peer"); + report_failed(address_book.clone(), candidate).await; + + // The demand signal that was taken out of the queue to attempt to connect to the + // failed candidate never turned into a connection, so add it back. + // + // # Security + // + // Handshake failures are rate-limited by peer attempt timeouts. + if let Err(send_error) = demand_tx.try_send(MorePeers) { + if send_error.is_disconnected() { + // Zebra is shutting down + return Err(send_error.into()); } } } } + + Ok(()) +} + +/// Mark `addr` as a failed peer in `address_book`. +#[instrument(skip(address_book))] +async fn report_failed(address_book: Arc>, addr: MetaAddr) { + let addr = MetaAddr::new_errored(addr.addr, addr.services); + + // # Correctness + // + // Spawn address book accesses on a blocking thread, to avoid deadlocks (see #1976). + let span = Span::current(); + let updated_addr = tokio::task::spawn_blocking(move || { + span.in_scope(|| address_book.lock().unwrap().update(addr)) + }) + .wait_for_panics() + .await; + + assert_eq!( + updated_addr.map(|addr| addr.addr()), + Some(addr.addr()), + "incorrect address updated by address book: \ + original: {addr:?}, updated: {updated_addr:?}" + ); } diff --git a/zebra-network/src/peer_set/initialize/recent_by_ip.rs b/zebra-network/src/peer_set/initialize/recent_by_ip.rs new file mode 100644 index 00000000000..b2fcf7502ff --- /dev/null +++ b/zebra-network/src/peer_set/initialize/recent_by_ip.rs @@ -0,0 +1,94 @@ +//! A set of IPs from recent connection attempts. + +use std::{ + collections::{HashMap, VecDeque}, + net::IpAddr, + time::{Duration, Instant}, +}; + +use crate::constants; + +#[cfg(test)] +mod tests; + +#[derive(Debug)] +/// Stores IPs of recently attempted inbound connections. +pub struct RecentByIp { + /// The list of IPs in decreasing connection age order. + pub by_time: VecDeque<(IpAddr, Instant)>, + + /// Stores IPs for recently attempted inbound connections. + pub by_ip: HashMap, + + /// The maximum number of peer connections Zebra will keep for a given IP address + /// before it drops any additional peer connections with that IP. + pub max_connections_per_ip: usize, + + /// The duration to wait after an entry is added before removing it. + pub time_limit: Duration, +} + +impl Default for RecentByIp { + fn default() -> Self { + Self::new(None, None) + } +} + +impl RecentByIp { + /// Creates a new [`RecentByIp`] + pub fn new(time_limit: Option, max_connections_per_ip: Option) -> Self { + let (by_time, by_ip) = Default::default(); + Self { + by_time, + by_ip, + time_limit: time_limit.unwrap_or(constants::MIN_PEER_RECONNECTION_DELAY), + max_connections_per_ip: max_connections_per_ip + .unwrap_or(constants::DEFAULT_MAX_CONNS_PER_IP), + } + } + + /// Prunes outdated entries, checks if there's a recently attempted inbound connection with + /// this IP, and adds the entry to `by_time`, and `by_ip` if needed. + /// + /// Returns true if the recently attempted inbound connection count is past the configured limit. + pub fn is_past_limit_or_add(&mut self, ip: IpAddr) -> bool { + let now = Instant::now(); + self.prune_by_time(now); + + let count = self.by_ip.entry(ip).or_default(); + if *count >= self.max_connections_per_ip { + true + } else { + *count += 1; + self.by_time.push_back((ip, now)); + false + } + } + + /// Prunes entries older than `time_limit`, decrementing or removing their counts in `by_ip`. + fn prune_by_time(&mut self, now: Instant) { + // Currently saturates to zero: + // + // + // This discards the whole structure if the time limit is very large, + // which is unexpected, but stops this list growing without limit. + // After the handshake, the peer set will remove any duplicate connections over the limit. + let age_limit = now - self.time_limit; + + // `by_time` must be sorted for this to work. + let split_off_idx = self.by_time.partition_point(|&(_, time)| time <= age_limit); + + let updated_by_time = self.by_time.split_off(split_off_idx); + + for (ip, _) in &self.by_time { + if let Some(count) = self.by_ip.get_mut(ip) { + *count -= 1; + if *count == 0 { + self.by_ip.remove(ip); + } + } + } + + self.by_time = updated_by_time; + } +} diff --git a/zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs b/zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs new file mode 100644 index 00000000000..e5a589cd3ee --- /dev/null +++ b/zebra-network/src/peer_set/initialize/recent_by_ip/tests.rs @@ -0,0 +1,69 @@ +//! Fixed test vectors for recent IP limits. + +use std::time::Duration; + +use crate::peer_set::initialize::recent_by_ip::RecentByIp; + +#[test] +fn old_connection_attempts_are_pruned() { + const TEST_TIME_LIMIT: Duration = Duration::from_secs(5); + + let _init_guard = zebra_test::init(); + + let mut recent_connections = RecentByIp::new(Some(TEST_TIME_LIMIT), None); + let ip = "127.0.0.1".parse().expect("should parse"); + + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should not be past limit" + ); + assert!( + recent_connections.is_past_limit_or_add(ip), + "should be past max_connections_per_ip limit" + ); + + std::thread::sleep(TEST_TIME_LIMIT / 3); + + assert!( + recent_connections.is_past_limit_or_add(ip), + "should still contain entry after a third of the time limit" + ); + + std::thread::sleep(3 * TEST_TIME_LIMIT / 4); + + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should prune entry after 13/12 * time_limit" + ); + + const TEST_MAX_CONNS_PER_IP: usize = 3; + + let mut recent_connections = + RecentByIp::new(Some(TEST_TIME_LIMIT), Some(TEST_MAX_CONNS_PER_IP)); + + for _ in 0..TEST_MAX_CONNS_PER_IP { + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should not be past limit" + ); + } + + assert!( + recent_connections.is_past_limit_or_add(ip), + "should be past max_connections_per_ip limit" + ); + + std::thread::sleep(TEST_TIME_LIMIT / 3); + + assert!( + recent_connections.is_past_limit_or_add(ip), + "should still be past limit after a third of the reconnection delay" + ); + + std::thread::sleep(3 * TEST_TIME_LIMIT / 4); + + assert!( + !recent_connections.is_past_limit_or_add(ip), + "should prune entry after 13/12 * time_limit" + ); +} diff --git a/zebra-network/src/peer_set/initialize/tests/vectors.rs b/zebra-network/src/peer_set/initialize/tests/vectors.rs index 4ef69cb549d..c871ab43227 100644 --- a/zebra-network/src/peer_set/initialize/tests/vectors.rs +++ b/zebra-network/src/peer_set/initialize/tests/vectors.rs @@ -31,6 +31,7 @@ use zebra_test::net::random_known_port; use crate::{ address_book_updater::AddressBookUpdater, + config::CacheDir, constants, init, meta_addr::{MetaAddr, PeerAddrState}, peer::{self, ClientTestHarness, HandshakeRequest, OutboundConnectorRequest}, @@ -53,6 +54,11 @@ use Network::*; /// Using a very short time can make the crawler not run at all. const CRAWLER_TEST_DURATION: Duration = Duration::from_secs(10); +/// The amount of time to run the peer cache updater task, before testing what it has done. +/// +/// Using a very short time can make the peer cache updater not run at all. +const PEER_CACHE_UPDATER_TEST_DURATION: Duration = Duration::from_secs(25); + /// The amount of time to run the listener, before testing what it has done. /// /// Using a very short time can make the listener not run at all. @@ -288,6 +294,89 @@ async fn peer_limit_two_testnet() { // Any number of address book peers is valid here, because some peers might have failed. } +/// Test zebra-network writes a peer cache file, and can read it back manually. +#[tokio::test] +async fn written_peer_cache_can_be_read_manually() { + let _init_guard = zebra_test::init(); + + if zebra_test::net::zebra_skip_network_tests() { + return; + } + + let nil_inbound_service = service_fn(|_| async { Ok(Response::Nil) }); + + // The default config should have an active peer cache + let config = Config::default(); + let address_book = + init_with_peer_limit(25, nil_inbound_service, Mainnet, None, config.clone()).await; + + // Let the peer cache updater run for a while. + tokio::time::sleep(PEER_CACHE_UPDATER_TEST_DURATION).await; + + let approximate_peer_count = address_book + .lock() + .expect("previous thread panicked while holding address book lock") + .len(); + if approximate_peer_count > 0 { + let cached_peers = config + .load_peer_cache() + .await + .expect("unexpected error reading peer cache"); + + assert!( + !cached_peers.is_empty(), + "unexpected empty peer cache from manual load: {:?}", + config.cache_dir.peer_cache_file_path(config.network) + ); + } +} + +/// Test zebra-network writes a peer cache file, and reads it back automatically. +#[tokio::test] +async fn written_peer_cache_is_automatically_read_on_startup() { + let _init_guard = zebra_test::init(); + + if zebra_test::net::zebra_skip_network_tests() { + return; + } + + let nil_inbound_service = service_fn(|_| async { Ok(Response::Nil) }); + + // The default config should have an active peer cache + let mut config = Config::default(); + let address_book = + init_with_peer_limit(25, nil_inbound_service, Mainnet, None, config.clone()).await; + + // Let the peer cache updater run for a while. + tokio::time::sleep(PEER_CACHE_UPDATER_TEST_DURATION).await; + + let approximate_peer_count = address_book + .lock() + .expect("previous thread panicked while holding address book lock") + .len(); + if approximate_peer_count > 0 { + // Make sure our only peers are coming from the disk cache + config.initial_mainnet_peers = Default::default(); + + let address_book = + init_with_peer_limit(25, nil_inbound_service, Mainnet, None, config.clone()).await; + + // Let the peer cache reader run and fill the address book. + tokio::time::sleep(CRAWLER_TEST_DURATION).await; + + // We should have loaded at least one peer from the cache + let approximate_cached_peer_count = address_book + .lock() + .expect("previous thread panicked while holding address book lock") + .len(); + assert!( + approximate_cached_peer_count > 0, + "unexpected empty address book using cache from previous instance: {:?}", + config.cache_dir.peer_cache_file_path(config.network) + ); + } +} + /// Test the crawler with an outbound peer limit of zero peers, and a connector that panics. #[tokio::test] async fn crawler_peer_limit_zero_connect_panic() { @@ -370,15 +459,7 @@ async fn crawler_peer_limit_one_connect_ok_then_drop() { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -432,15 +513,7 @@ async fn crawler_peer_limit_one_connect_ok_stay_open() { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -542,15 +615,7 @@ async fn crawler_peer_limit_default_connect_ok_then_drop() { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -605,15 +670,7 @@ async fn crawler_peer_limit_default_connect_ok_stay_open() { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -670,7 +727,7 @@ async fn listener_peer_limit_zero_handshake_panic() { }); let (_config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(0, unreachable_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(0, None, unreachable_inbound_handshaker).await; let peer_result = peerset_rx.try_next(); assert!( @@ -695,7 +752,7 @@ async fn listener_peer_limit_one_handshake_error() { service_fn(|_| async { Err("test inbound handshaker always returns errors".into()) }); let (_config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(1, error_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(1, None, error_inbound_handshaker).await; let peer_result = peerset_rx.try_next(); assert!( @@ -737,23 +794,19 @@ async fn listener_peer_limit_one_handshake_ok_then_drop() { Ok(fake_client) }); - let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(1, success_disconnect_inbound_handshaker).await; + let (config, mut peerset_rx) = spawn_inbound_listener_with_peer_limit( + 1, + usize::MAX, + success_disconnect_inbound_handshaker, + ) + .await; let mut peer_count: usize = 0; loop { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -804,22 +857,14 @@ async fn listener_peer_limit_one_handshake_ok_stay_open() { }); let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(1, success_stay_open_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(1, None, success_stay_open_inbound_handshaker).await; let mut peer_change_count: usize = 0; loop { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -876,7 +921,7 @@ async fn listener_peer_limit_default_handshake_error() { service_fn(|_| async { Err("test inbound handshaker always returns errors".into()) }); let (_config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(None, error_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(None, None, error_inbound_handshaker).await; let peer_result = peerset_rx.try_next(); assert!( @@ -922,23 +967,19 @@ async fn listener_peer_limit_default_handshake_ok_then_drop() { Ok(fake_client) }); - let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(None, success_disconnect_inbound_handshaker).await; + let (config, mut peerset_rx) = spawn_inbound_listener_with_peer_limit( + None, + usize::MAX, + success_disconnect_inbound_handshaker, + ) + .await; let mut peer_count: usize = 0; loop { let peer_result = peerset_rx.try_next(); match peer_result { // A peer handshake succeeded. - Ok(Some(peer_result)) => { - assert!( - matches!(peer_result, Ok((_, _))), - "unexpected connection error: {peer_result:?}\n\ - {peer_count} previous peers succeeded", - ); - peer_count += 1; - } - + Ok(Some(_peer_change)) => peer_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -989,22 +1030,15 @@ async fn listener_peer_limit_default_handshake_ok_stay_open() { }); let (config, mut peerset_rx) = - spawn_inbound_listener_with_peer_limit(None, success_stay_open_inbound_handshaker).await; + spawn_inbound_listener_with_peer_limit(None, None, success_stay_open_inbound_handshaker) + .await; let mut peer_change_count: usize = 0; loop { let peer_change_result = peerset_rx.try_next(); match peer_change_result { // A peer handshake succeeded. - Ok(Some(peer_change_result)) => { - assert!( - matches!(peer_change_result, Ok((_, _))), - "unexpected connection error: {peer_change_result:?}\n\ - {peer_change_count} previous peers succeeded", - ); - peer_change_count += 1; - } - + Ok(Some(_peer_change)) => peer_change_count += 1, // The channel is closed and there are no messages left in the channel. Ok(None) => break, // The channel is still open, but there are no messages left in the channel. @@ -1069,7 +1103,8 @@ async fn add_initial_peers_is_rate_limited() { let elapsed = Instant::now() - before; - assert_eq!(connections.len(), PEER_COUNT); + // Errors are ignored, so we don't expect any peers here + assert_eq!(connections.len(), 0); // Make sure the rate limiting worked by checking if it took long enough assert!( elapsed @@ -1087,7 +1122,7 @@ async fn add_initial_peers_is_rate_limited() { // Check for panics or errors in the address book updater task. let updater_result = address_book_updater_task_handle.now_or_never(); assert!( - matches!(updater_result, None) + updater_result.is_none() || matches!(updater_result, Some(Err(ref join_error)) if join_error.is_cancelled()) // The task method only returns one kind of error. // We can't check for error equality due to type erasure, @@ -1126,6 +1161,7 @@ async fn self_connections_should_fail() { initial_mainnet_peers: IndexSet::new(), initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), ..Config::default() }; @@ -1145,7 +1181,7 @@ async fn self_connections_should_fail() { .lock() .expect("unexpected panic in address book"); - let real_self_listener = unlocked_address_book.local_listener_meta_addr(); + let real_self_listener = unlocked_address_book.local_listener_meta_addr(Utc::now()); // Set a fake listener to get past the check for adding our own address unlocked_address_book.set_local_listener("192.168.0.0:1".parse().unwrap()); @@ -1371,6 +1407,7 @@ async fn local_listener_port_with(listen_addr: SocketAddr, network: Network) { // Stop Zebra making outbound connections initial_mainnet_peers: IndexSet::new(), initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), ..Config::default() }; @@ -1384,7 +1421,10 @@ async fn local_listener_port_with(listen_addr: SocketAddr, network: Network) { "Test user agent".to_string(), ) .await; - let local_listener = address_book.lock().unwrap().local_listener_meta_addr(); + let local_listener = address_book + .lock() + .unwrap() + .local_listener_meta_addr(Utc::now()); if listen_addr.port() == 0 { assert_ne!( @@ -1422,7 +1462,7 @@ async fn init_with_peer_limit( default_config: impl Into>, ) -> Arc> where - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, S::Future: Send + 'static, { // This test might fail on machines with no configured IPv4 addresses @@ -1478,7 +1518,12 @@ where } // Manually initialize an address book without a timestamp tracker. - let mut address_book = AddressBook::new(config.listen_addr, config.network, Span::current()); + let mut address_book = AddressBook::new( + config.listen_addr, + config.network, + config.max_connections_per_ip, + Span::current(), + ); // Add enough fake peers to go over the limit, even if the limit is zero. let over_limit_peers = config.peerset_outbound_connection_limit() * 2 + 1; @@ -1549,8 +1594,7 @@ where // Check for panics or errors in the crawler. let crawl_result = crawl_task_handle.now_or_never(); assert!( - matches!(crawl_result, None) - || matches!(crawl_result, Some(Err(ref e)) if e.is_cancelled()), + crawl_result.is_none() || matches!(crawl_result, Some(Err(ref e)) if e.is_cancelled()), "unexpected error or panic in peer crawler task: {crawl_result:?}", ); @@ -1574,12 +1618,14 @@ where /// Returns the generated [`Config`], and the peer set receiver. async fn spawn_inbound_listener_with_peer_limit( peerset_initial_target_size: impl Into>, + max_connections_per_ip: impl Into>, listen_handshaker: S, ) -> (Config, mpsc::Receiver) where S: Service, Response = peer::Client, Error = BoxError> + Clone + Send + + Sync + 'static, S::Future: Send + 'static, { @@ -1587,6 +1633,9 @@ where let listen_addr = "127.0.0.1:0".parse().unwrap(); let mut config = Config { listen_addr, + max_connections_per_ip: max_connections_per_ip + .into() + .unwrap_or(constants::DEFAULT_MAX_CONNS_PER_IP), ..Config::default() }; @@ -1655,8 +1704,7 @@ where // Check for panics or errors in the listener. let listen_result = listen_task_handle.now_or_never(); assert!( - matches!(listen_result, None) - || matches!(listen_result, Some(Err(ref e)) if e.is_cancelled()), + listen_result.is_none() || matches!(listen_result, Some(Err(ref e)) if e.is_cancelled()), "unexpected error or panic in inbound peer listener task: {listen_result:?}", ); @@ -1703,6 +1751,8 @@ where let config = Config { initial_mainnet_peers: peers, + // We want exactly the above list of peers, without any cached peers. + cache_dir: CacheDir::disabled(), network: Network::Mainnet, listen_addr: unused_v4, diff --git a/zebra-network/src/peer_set/limit.rs b/zebra-network/src/peer_set/limit.rs index 6c1bfc76f2b..aef255dac1a 100644 --- a/zebra-network/src/peer_set/limit.rs +++ b/zebra-network/src/peer_set/limit.rs @@ -66,7 +66,7 @@ impl ActiveConnectionCounter { let label = label.to_string(); #[cfg(feature = "progress-bar")] - let connection_bar = howudoin::new().label(label.clone()); + let connection_bar = howudoin::new_root().label(label.clone()); Self { count: 0, @@ -115,8 +115,8 @@ impl ActiveConnectionCounter { #[cfg(feature = "progress-bar")] self.connection_bar - .set_pos(u64::try_from(self.count).expect("fits in u64")) - .set_len(u64::try_from(self.limit).expect("fits in u64")); + .set_pos(u64::try_from(self.count).expect("fits in u64")); + // .set_len(u64::try_from(self.limit).expect("fits in u64")); self.count } @@ -178,8 +178,9 @@ impl Drop for ConnectionTracker { // We ignore disconnected errors, because the receiver can be dropped // before some connections are dropped. + // # Security // - // TODO: This channel will be bounded by the connection limit (#1850, #1851, #2902). + // This channel is actually bounded by the inbound and outbound connection limit. let _ = self.close_notification_tx.send(ConnectionClosed); } } diff --git a/zebra-network/src/peer_set/set.rs b/zebra-network/src/peer_set/set.rs index 88d53b5461a..2fa546c9883 100644 --- a/zebra-network/src/peer_set/set.rs +++ b/zebra-network/src/peer_set/set.rs @@ -2,8 +2,9 @@ //! //! # Implementation //! -//! The [`PeerSet`] implementation is adapted from the one in the [Tower Balance][tower-balance] crate. -//! As described in that crate's documentation, it: +//! The [`PeerSet`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! +//! As described in Tower's documentation, it: //! //! > Distributes requests across inner services using the [Power of Two Choices][p2c]. //! > @@ -40,7 +41,7 @@ //! //! [finagle]: https://twitter.github.io/finagle/guide/Clients.html#power-of-two-choices-p2c-least-loaded //! [p2c]: http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf -//! [tower-balance]: https://crates.io/crates/tower-balance +//! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance //! //! # Behavior During Network Upgrades //! @@ -98,6 +99,7 @@ use std::{ fmt::Debug, future::Future, marker::PhantomData, + net::IpAddr, pin::Pin, task::{Context, Poll}, time::Instant, @@ -109,6 +111,8 @@ use futures::{ prelude::*, stream::FuturesUnordered, }; +use itertools::Itertools; +use num_integer::div_ceil; use tokio::{ sync::{broadcast, oneshot::error::TryRecvError, watch}, task::JoinHandle, @@ -123,6 +127,7 @@ use zebra_chain::chain_tip::ChainTip; use crate::{ address_book::AddressMetrics, + constants::MIN_PEER_SET_LOG_INTERVAL, peer::{LoadTrackedClient, MinimumPeerVersion}, peer_set::{ unready_service::{Error as UnreadyError, UnreadyService}, @@ -247,6 +252,10 @@ where /// The last time we logged a message about the peer set size last_peer_log: Option, + + /// The configured maximum number of peers that can be in the + /// peer set per IP, defaults to [`crate::constants::DEFAULT_MAX_CONNS_PER_IP`] + max_conns_per_ip: usize, } impl Drop for PeerSet @@ -266,6 +275,7 @@ where D::Error: Into, C: ChainTip, { + #[allow(clippy::too_many_arguments)] /// Construct a peerset which uses `discover` to manage peer connections. /// /// Arguments: @@ -278,6 +288,10 @@ where /// - `inv_stream`: receives inventory changes from peers, /// allowing the peer set to direct inventory requests; /// - `address_book`: when peer set is busy, it logs address book diagnostics. + /// - `minimum_peer_version`: endpoint to see the minimum peer protocol version in real time. + /// - `max_conns_per_ip`: configured maximum number of peers that can be in the + /// peer set per IP, defaults to the config value or to + /// [`crate::constants::DEFAULT_MAX_CONNS_PER_IP`]. pub fn new( config: &Config, discover: D, @@ -286,6 +300,7 @@ where inv_stream: broadcast::Receiver, address_metrics: watch::Receiver, minimum_peer_version: MinimumPeerVersion, + max_conns_per_ip: Option, ) -> Self { Self { // New peers @@ -313,6 +328,8 @@ where // Metrics last_peer_log: None, address_metrics, + + max_conns_per_ip: max_conns_per_ip.unwrap_or(config.max_connections_per_ip), } } @@ -418,8 +435,6 @@ where for guard in self.guards.iter() { guard.abort(); } - - // TODO: implement graceful shutdown for InventoryRegistry (#1678) } /// Check busy peer services for request completion or errors. @@ -474,6 +489,26 @@ where } } + /// Returns the number of peer connections Zebra already has with + /// the provided IP address + /// + /// # Performance + /// + /// This method is `O(connected peers)`, so it should not be called from a loop + /// that is already iterating through the peer set. + fn num_peers_with_ip(&self, ip: IpAddr) -> usize { + self.ready_services + .keys() + .chain(self.cancel_handles.keys()) + .filter(|addr| addr.ip() == ip) + .count() + } + + /// Returns `true` if Zebra is already connected to the IP and port in `addr`. + fn has_peer_with_addr(&self, addr: PeerSocketAddr) -> bool { + self.ready_services.contains_key(&addr) || self.cancel_handles.contains_key(&addr) + } + /// Checks for newly inserted or removed services. /// /// Puts inserted services in the unready list. @@ -494,7 +529,25 @@ where // - always do the same checks on every ready peer, and // - check for any errors that happened right after the handshake trace!(?key, "got Change::Insert from Discover"); - self.remove(&key); + + // # Security + // + // Drop the new peer if we are already connected to it. + // Preferring old connections avoids connection thrashing. + if self.has_peer_with_addr(key) { + std::mem::drop(svc); + continue; + } + + // # Security + // + // drop the new peer if there are already `max_conns_per_ip` peers with + // the same IP address in the peer set. + if self.num_peers_with_ip(key.ip()) >= self.max_conns_per_ip { + std::mem::drop(svc); + continue; + } + self.push_unready(key, svc); } } @@ -504,15 +557,12 @@ where /// Checks if the minimum peer version has changed, and disconnects from outdated peers. fn disconnect_from_outdated_peers(&mut self) { if let Some(minimum_version) = self.minimum_peer_version.changed() { - // TODO: Remove when the code base migrates to Rust 2021 edition (#2709). - let preselected_p2c_peer = &mut self.preselected_p2c_peer; - self.ready_services.retain(|address, peer| { if peer.remote_version() >= minimum_version { true } else { - if *preselected_p2c_peer == Some(*address) { - *preselected_p2c_peer = None; + if self.preselected_p2c_peer == Some(*address) { + self.preselected_p2c_peer = None; } false @@ -733,11 +783,11 @@ where return fut.map_err(Into::into).boxed(); } - // TODO: reduce this log level after testing #2156 and #2726 - tracing::info!( + tracing::debug!( ?hash, "all ready peers are missing inventory, failing request" ); + async move { // Let other tasks run, so a retry request might get different ready peers. tokio::task::yield_now().await; @@ -810,38 +860,91 @@ where /// Given a number of ready peers calculate to how many of them Zebra will /// actually send the request to. Return this number. pub(crate) fn number_of_peers_to_broadcast(&self) -> usize { - // We are currently sending broadcast messages to half of the total peers. + // We are currently sending broadcast messages to a third of the total peers. + const PEER_FRACTION_TO_BROADCAST: usize = 3; + // Round up, so that if we have one ready peer, it gets the request. - (self.ready_services.len() + 1) / 2 + div_ceil(self.ready_services.len(), PEER_FRACTION_TO_BROADCAST) + } + + /// Returns the list of addresses in the peer set. + fn peer_set_addresses(&self) -> Vec { + self.ready_services + .keys() + .chain(self.cancel_handles.keys()) + .cloned() + .collect() } - /// Logs the peer set size. + /// Logs the peer set size, and any potential connectivity issues. fn log_peer_set_size(&mut self) { let ready_services_len = self.ready_services.len(); let unready_services_len = self.unready_services.len(); trace!(ready_peers = ?ready_services_len, unready_peers = ?unready_services_len); - if ready_services_len > 0 { - return; - } + let now = Instant::now(); // These logs are designed to be human-readable in a terminal, at the // default Zebra log level. If you need to know the peer set size for // every request, use the trace-level logs, or the metrics exporter. if let Some(last_peer_log) = self.last_peer_log { // Avoid duplicate peer set logs - if Instant::now().duration_since(last_peer_log).as_secs() < 60 { + if now.duration_since(last_peer_log) < MIN_PEER_SET_LOG_INTERVAL { return; } } else { // Suppress initial logs until the peer set has started up. // There can be multiple initial requests before the first peer is // ready. - self.last_peer_log = Some(Instant::now()); + self.last_peer_log = Some(now); return; } - self.last_peer_log = Some(Instant::now()); + self.last_peer_log = Some(now); + + // Log potential duplicate connections. + let peers = self.peer_set_addresses(); + + // Check for duplicates by address and port: these are unexpected and represent a bug. + let duplicates: Vec = peers.iter().duplicates().cloned().collect(); + + let mut peer_counts = peers.iter().counts(); + peer_counts.retain(|peer, _count| duplicates.contains(peer)); + + if !peer_counts.is_empty() { + let duplicate_connections: usize = peer_counts.values().sum(); + + warn!( + ?duplicate_connections, + duplicated_peers = ?peer_counts.len(), + peers = ?peers.len(), + "duplicate peer connections in peer set" + ); + } + + // Check for duplicates by address: these can happen if there are multiple nodes + // behind a NAT or on a single server. + let peers: Vec = peers.iter().map(|addr| addr.ip()).collect(); + let duplicates: Vec = peers.iter().duplicates().cloned().collect(); + + let mut peer_counts = peers.iter().counts(); + peer_counts.retain(|peer, _count| duplicates.contains(peer)); + + if !peer_counts.is_empty() { + let duplicate_connections: usize = peer_counts.values().sum(); + + info!( + ?duplicate_connections, + duplicated_peers = ?peer_counts.len(), + peers = ?peers.len(), + "duplicate IP addresses in peer set" + ); + } + + // Only log connectivity warnings if all our peers are busy (or there are no peers). + if ready_services_len > 0 { + return; + } let address_metrics = *self.address_metrics.borrow(); if unready_services_len == 0 { diff --git a/zebra-network/src/peer_set/set/tests.rs b/zebra-network/src/peer_set/set/tests.rs index a24330f9833..e044447e389 100644 --- a/zebra-network/src/peer_set/set/tests.rs +++ b/zebra-network/src/peer_set/set/tests.rs @@ -23,6 +23,7 @@ use zebra_chain::{ use crate::{ address_book::AddressMetrics, + constants::DEFAULT_MAX_CONNS_PER_IP, peer::{ClientTestHarness, LoadTrackedClient, MinimumPeerVersion}, peer_set::{set::MorePeers, InventoryChange, PeerSet}, protocol::external::types::Version, @@ -117,6 +118,7 @@ struct PeerSetBuilder { inv_stream: Option>, address_book: Option>>, minimum_peer_version: Option>, + max_conns_per_ip: Option, } impl PeerSetBuilder<(), ()> { @@ -137,6 +139,7 @@ impl PeerSetBuilder { inv_stream: self.inv_stream, address_book: self.address_book, minimum_peer_version: self.minimum_peer_version, + max_conns_per_ip: self.max_conns_per_ip, } } @@ -146,13 +149,33 @@ impl PeerSetBuilder { minimum_peer_version: MinimumPeerVersion, ) -> PeerSetBuilder { PeerSetBuilder { + config: self.config, + discover: self.discover, + demand_signal: self.demand_signal, + handle_rx: self.handle_rx, + inv_stream: self.inv_stream, + address_book: self.address_book, minimum_peer_version: Some(minimum_peer_version), + max_conns_per_ip: self.max_conns_per_ip, + } + } + + /// Use the provided [`MinimumPeerVersion`] instance when constructing the [`PeerSet`] instance. + pub fn max_conns_per_ip(self, max_conns_per_ip: usize) -> PeerSetBuilder { + assert!( + max_conns_per_ip > 0, + "max_conns_per_ip must be greater than zero" + ); + + PeerSetBuilder { config: self.config, discover: self.discover, demand_signal: self.demand_signal, handle_rx: self.handle_rx, inv_stream: self.inv_stream, address_book: self.address_book, + minimum_peer_version: self.minimum_peer_version, + max_conns_per_ip: Some(max_conns_per_ip), } } } @@ -175,6 +198,7 @@ where let minimum_peer_version = self .minimum_peer_version .expect("`minimum_peer_version` must be set"); + let max_conns_per_ip = self.max_conns_per_ip; let demand_signal = self .demand_signal @@ -196,6 +220,7 @@ where inv_stream, address_metrics, minimum_peer_version, + max_conns_per_ip, ); (peer_set, guard) @@ -309,7 +334,12 @@ impl PeerSetGuard { let local_listener = "127.0.0.1:1000" .parse() .expect("Invalid local listener address"); - let address_book = AddressBook::new(local_listener, Network::Mainnet, Span::none()); + let address_book = AddressBook::new( + local_listener, + Network::Mainnet, + DEFAULT_MAX_CONNS_PER_IP, + Span::none(), + ); Arc::new(std::sync::Mutex::new(address_book)) } diff --git a/zebra-network/src/peer_set/set/tests/prop.rs b/zebra-network/src/peer_set/set/tests/prop.rs index 1a95f31e642..b7301fea214 100644 --- a/zebra-network/src/peer_set/set/tests/prop.rs +++ b/zebra-network/src/peer_set/set/tests/prop.rs @@ -42,6 +42,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version) + .max_conns_per_ip(usize::MAX) .build(); check_if_only_up_to_date_peers_are_live( @@ -72,6 +73,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); check_if_only_up_to_date_peers_are_live( @@ -122,6 +124,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); // Get the total number of active peers @@ -197,6 +200,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); // Remove peers, test broadcast until there is only 1 peer left in the peerset @@ -267,6 +271,7 @@ proptest! { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(usize::MAX) .build(); // Remove peers @@ -306,7 +311,7 @@ where .all(|harness| harness.remote_version() < minimum_version); if all_peers_are_outdated { - prop_assert!(matches!(poll_result, None)); + prop_assert!(poll_result.is_none()); } else { prop_assert!(matches!(poll_result, Some(Ok(_)))); } diff --git a/zebra-network/src/peer_set/set/tests/vectors.rs b/zebra-network/src/peer_set/set/tests/vectors.rs index 1f58f0f0b0f..8290469997c 100644 --- a/zebra-network/src/peer_set/set/tests/vectors.rs +++ b/zebra-network/src/peer_set/set/tests/vectors.rs @@ -1,6 +1,6 @@ //! Fixed test vectors for the peer set. -use std::{iter, time::Duration}; +use std::{cmp::max, iter, time::Duration}; use tokio::time::timeout; use tower::{Service, ServiceExt}; @@ -12,6 +12,7 @@ use zebra_chain::{ use super::{PeerSetBuilder, PeerVersions}; use crate::{ + constants::DEFAULT_MAX_CONNS_PER_IP, peer::{ClientRequest, MinimumPeerVersion}, peer_set::inventory_registry::InventoryStatus, protocol::external::{types::Version, InventoryHash}, @@ -144,6 +145,7 @@ fn peer_set_ready_multiple_connections() { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(3, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Get peerset ready @@ -174,6 +176,55 @@ fn peer_set_ready_multiple_connections() { }); } +#[test] +fn peer_set_rejects_connections_past_per_ip_limit() { + const NUM_PEER_VERSIONS: usize = crate::constants::DEFAULT_MAX_CONNS_PER_IP + 1; + + // Use three peers with the same version + let peer_version = Version::min_specified_for_upgrade(Network::Mainnet, NetworkUpgrade::Nu5); + let peer_versions = PeerVersions { + peer_versions: [peer_version; NUM_PEER_VERSIONS].into_iter().collect(), + }; + + // Start the runtime + let (runtime, _init_guard) = zebra_test::init_async(); + let _guard = runtime.enter(); + + // Pause the runtime's timer so that it advances automatically. + // + // CORRECTNESS: This test does not depend on external resources that could really timeout, like + // real network connections. + tokio::time::pause(); + + // Get peers and client handles of them + let (discovered_peers, handles) = peer_versions.mock_peer_discovery(); + let (minimum_peer_version, _best_tip_height) = + MinimumPeerVersion::with_mock_chain_tip(Network::Mainnet); + + // Make sure we have the right number of peers + assert_eq!(handles.len(), NUM_PEER_VERSIONS); + + runtime.block_on(async move { + // Build a peerset + let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() + .with_discover(discovered_peers) + .with_minimum_peer_version(minimum_peer_version.clone()) + .build(); + + // Get peerset ready + let peer_ready = peer_set + .ready() + .await + .expect("peer set service is always ready"); + + // Check we have the right amount of ready services + assert_eq!( + peer_ready.ready_services.len(), + crate::constants::DEFAULT_MAX_CONNS_PER_IP + ); + }); +} + /// Check that a peer set with an empty inventory registry routes requests to a random ready peer. #[test] fn peer_set_route_inv_empty_registry() { @@ -208,6 +259,7 @@ fn peer_set_route_inv_empty_registry() { let (mut peer_set, _peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(2, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Get peerset ready @@ -290,6 +342,7 @@ fn peer_set_route_inv_advertised_registry_order(advertised_first: bool) { let (mut peer_set, mut peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(2, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Advertise some inventory @@ -336,12 +389,10 @@ fn peer_set_route_inv_advertised_registry_order(advertised_first: bool) { }; assert!( - matches!( - other_handle - .try_to_receive_outbound_client_request() - .request(), - None - ), + other_handle + .try_to_receive_outbound_client_request() + .request() + .is_none(), "request routed to non-advertised peer", ); }); @@ -399,6 +450,7 @@ fn peer_set_route_inv_missing_registry_order(missing_first: bool) { let (mut peer_set, mut peer_set_guard) = PeerSetBuilder::new() .with_discover(discovered_peers) .with_minimum_peer_version(minimum_peer_version.clone()) + .max_conns_per_ip(max(2, DEFAULT_MAX_CONNS_PER_IP)) .build(); // Mark some inventory as missing @@ -430,12 +482,10 @@ fn peer_set_route_inv_missing_registry_order(missing_first: bool) { }; assert!( - matches!( - missing_handle - .try_to_receive_outbound_client_request() - .request(), - None - ), + missing_handle + .try_to_receive_outbound_client_request() + .request() + .is_none(), "request routed to missing peer", ); @@ -529,12 +579,9 @@ fn peer_set_route_inv_all_missing_fail() { let missing_handle = &mut handles[0]; assert!( - matches!( - missing_handle + missing_handle .try_to_receive_outbound_client_request() - .request(), - None - ), + .request().is_none(), "request routed to missing peer", ); diff --git a/zebra-network/src/peer_set/unready_service.rs b/zebra-network/src/peer_set/unready_service.rs index 108a9e8307f..d49587cde1d 100644 --- a/zebra-network/src/peer_set/unready_service.rs +++ b/zebra-network/src/peer_set/unready_service.rs @@ -1,6 +1,9 @@ -/// Services that are busy or newly created. -/// -/// Adapted from tower-balance. +//! Services that are busy or newly created. +//! +//! The [`UnreadyService`] implementation is adapted from the one in [tower::Balance][tower-balance]. +//! +//! [tower-balance]: https://github.com/tower-rs/tower/tree/master/tower/src/balance + use std::{ future::Future, marker::PhantomData, diff --git a/zebra-network/src/peer_set/unready_service/tests/vectors.rs b/zebra-network/src/peer_set/unready_service/tests/vectors.rs index 6869900f93d..4f78980ea6a 100644 --- a/zebra-network/src/peer_set/unready_service/tests/vectors.rs +++ b/zebra-network/src/peer_set/unready_service/tests/vectors.rs @@ -1,6 +1,4 @@ //! Fixed test vectors for unready services. -//! -//! TODO: test that inner service errors are handled correctly (#3204) use std::marker::PhantomData; diff --git a/zebra-network/src/policies.rs b/zebra-network/src/policies.rs index 58ed093c64a..5e00207ba75 100644 --- a/zebra-network/src/policies.rs +++ b/zebra-network/src/policies.rs @@ -35,8 +35,11 @@ impl Policy // Let other tasks run, so we're more likely to choose a different peer, // and so that any notfound inv entries win the race to the PeerSet. // - // TODO: move syncer retries into the PeerSet, - // so we always choose different peers (#3235) + // # Security + // + // We want to choose different peers for retries, so we have a better chance of getting each block. + // This is implemented by the connection state machine sending synthetic `notfound`s to the + // `InventoryRegistry`, as well as forwarding actual `notfound`s from peers. Box::pin(tokio::task::yield_now().map(move |()| retry_outcome)), ) } else { diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index aec54772c85..6a4ae0585eb 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -184,10 +184,10 @@ impl Codec { /// Obtain the size of the body of a given message. This will match the /// number of bytes written to the writer provided to `write_body` for the /// same message. - /// - /// TODO: Replace with a size estimate, to avoid multiple serializations - /// for large data structures like lists, blocks, and transactions. - /// See #1774. + // # Performance TODO + // + // If this code shows up in profiles, replace with a size estimate or cached size, + // to avoid multiple serializations for large data structures like lists, blocks, and transactions. fn body_length(&self, msg: &Message) -> usize { let mut writer = FakeWriter(0); @@ -500,8 +500,6 @@ impl Codec { /// Note: zcashd only requires fields up to `address_recv`, but everything up to `relay` is required in Zebra. /// see fn read_version(&self, mut reader: R) -> Result { - // Clippy 1.64 is wrong here, this lazy evaluation is necessary, constructors are functions. This is fixed in 1.66. - #[allow(clippy::unnecessary_lazy_evaluations)] Ok(VersionMessage { version: Version(reader.read_u32::()?), // Use from_bits_truncate to discard unknown service bits. diff --git a/zebra-network/src/protocol/external/codec/tests/vectors.rs b/zebra-network/src/protocol/external/codec/tests/vectors.rs index 89c6b08f2a0..74f46ec7f52 100644 --- a/zebra-network/src/protocol/external/codec/tests/vectors.rs +++ b/zebra-network/src/protocol/external/codec/tests/vectors.rs @@ -467,12 +467,13 @@ fn version_user_agent_size_limits() { // Encode the rest of the message onto `bytes` (relay should be optional) { let Message::Version(VersionMessage { - user_agent, - start_height, - .. - }) = invalid_version_message else { - unreachable!("version_message is a version"); - }; + user_agent, + start_height, + .. + }) = invalid_version_message + else { + unreachable!("version_message is a version"); + }; user_agent .zcash_serialize(&mut writer) @@ -553,7 +554,8 @@ fn reject_command_and_reason_size_limits() { ccode, reason, data, - } = invalid_reject_message else { + } = invalid_reject_message + else { unreachable!("invalid_reject_message is a reject"); }; diff --git a/zebra-network/src/protocol/external/message.rs b/zebra-network/src/protocol/external/message.rs index 009566dc24d..f8ee8cbc9b8 100644 --- a/zebra-network/src/protocol/external/message.rs +++ b/zebra-network/src/protocol/external/message.rs @@ -401,7 +401,7 @@ impl TryFrom for VersionMessage { } } -// TODO: add tests for Error conversion and Reject message serialization (#4633) +// TODO: add tests for Error conversion and Reject message serialization // (Zebra does not currently send reject messages, and it ignores received reject messages.) impl From for Message where diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 2ac0df46cb1..1b8b9c824ce 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,10 +1,18 @@ [package] name = "zebra-node-services" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" -version = "1.0.0-beta.24" -edition = "2021" repository = "https://github.com/ZcashFoundation/zebra" +edition = "2021" + +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "network-programming"] [features] default = [] @@ -27,13 +35,22 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.28" } # Optional dependencies # Tool and test feature rpc-client color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } -reqwest = { version = "0.11.18", optional = true } -serde = { version = "1.0.163", optional = true } -serde_json = { version = "1.0.95", optional = true } +# Security: avoid default dependency on openssl +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } +serde = { version = "1.0.179", optional = true } +serde_json = { version = "1.0.104", optional = true } + +[dev-dependencies] + +color-eyre = "0.6.2" +jsonrpc-core = "18.0.0" +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } +serde = "1.0.179" +serde_json = "1.0.104" diff --git a/zebra-node-services/src/rpc_client.rs b/zebra-node-services/src/rpc_client.rs index e214af7350e..350b373aa72 100644 --- a/zebra-node-services/src/rpc_client.rs +++ b/zebra-node-services/src/rpc_client.rs @@ -43,6 +43,44 @@ impl RpcRequestClient { .await } + /// Builds rpc request with a variable `content-type`. + pub async fn call_with_content_type( + &self, + method: impl AsRef, + params: impl AsRef, + content_type: String, + ) -> reqwest::Result { + let method = method.as_ref(); + let params = params.as_ref(); + + self.client + .post(format!("http://{}", &self.rpc_address)) + .body(format!( + r#"{{"jsonrpc": "2.0", "method": "{method}", "params": {params}, "id":123 }}"# + )) + .header("Content-Type", content_type) + .send() + .await + } + + /// Builds rpc request with no content type. + pub async fn call_with_no_content_type( + &self, + method: impl AsRef, + params: impl AsRef, + ) -> reqwest::Result { + let method = method.as_ref(); + let params = params.as_ref(); + + self.client + .post(format!("http://{}", &self.rpc_address)) + .body(format!( + r#"{{"jsonrpc": "2.0", "method": "{method}", "params": {params}, "id":123 }}"# + )) + .send() + .await + } + /// Builds rpc request and gets text from response pub async fn text_from_call( &self, diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 67d17b91c06..d3b7bc8084e 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,11 +1,18 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "cryptography::cryptocurrencies", "encoding", "network-programming"] [features] default = [] @@ -32,51 +39,51 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } futures = "0.3.28" # lightwalletd sends JSON-RPC requests over HTTP 1.1 -hyper = { version = "0.14.26", features = ["http1", "server"] } +hyper = { version = "0.14.27", features = ["http1", "server"] } jsonrpc-core = "18.0.0" jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" -num_cpus = "1.15.0" +num_cpus = "1.16.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.96", features = ["preserve_order"] } -indexmap = { version = "1.9.3", features = ["serde"] } +serde_json = { version = "1.0.104", features = ["preserve_order"] } +indexmap = { version = "2.0.0", features = ["serde"] } -tokio = { version = "1.28.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } +tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" tracing = "0.1.37" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.163", features = ["serde_derive"] } +serde = { version = "1.0.179", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs -rand = { version = "0.8.5", package = "rand", optional = true } +rand = { version = "0.8.5", optional = true } # ECC deps used by getblocktemplate-rpcs feature -zcash_address = { version = "0.2.1", optional = true } +zcash_address = { version = "0.3.0", optional = true } # Test-only feature proptest-impl -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } -zebra-chain = { path = "../zebra-chain", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus" } -zebra-network = { path = "../zebra-network" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-script = { path = "../zebra-script" } -zebra-state = { path = "../zebra-state" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.28" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.28" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.28" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } [dev-dependencies] -insta = { version = "1.29.0", features = ["redactions", "json", "ron"] } +insta = { version = "1.31.0", features = ["redactions", "json", "ron"] } -proptest = "1.1.0" +proptest = "1.2.0" -thiserror = "1.0.40" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +thiserror = "1.0.44" +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-consensus = { path = "../zebra-consensus", features = ["proptest-impl"] } diff --git a/zebra-rpc/src/constants.rs b/zebra-rpc/src/constants.rs index 9d549767bd5..58b119ad264 100644 --- a/zebra-rpc/src/constants.rs +++ b/zebra-rpc/src/constants.rs @@ -16,3 +16,6 @@ pub const INVALID_PARAMETERS_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-1); /// `lightwalletd` expects error code `-8` when a block is not found: /// pub const MISSING_BLOCK_ERROR_CODE: ErrorCode = ErrorCode::ServerError(-8); + +/// When logging parameter data, only log this much data. +pub const MAX_PARAMS_LOG_LENGTH: usize = 100; diff --git a/zebra-rpc/src/methods.rs b/zebra-rpc/src/methods.rs index 4923d0513bd..0f9ccd9906f 100644 --- a/zebra-rpc/src/methods.rs +++ b/zebra-rpc/src/methods.rs @@ -251,8 +251,11 @@ where { // Configuration // - /// Zebra's application version. - app_version: String, + /// Zebra's application version, with build metadata. + build_version: String, + + /// Zebra's RPC user agent. + user_agent: String, /// The configured network for this RPC service. network: Network, @@ -300,8 +303,13 @@ where Tip: ChainTip + Clone + Send + Sync + 'static, { /// Create a new instance of the RPC handler. - pub fn new( - app_version: Version, + // + // TODO: + // - put some of the configs or services in their own struct? + #[allow(clippy::too_many_arguments)] + pub fn new( + build_version: VersionString, + user_agent: UserAgentString, network: Network, debug_force_finished_sync: bool, debug_like_zcashd: bool, @@ -310,21 +318,24 @@ where latest_chain_tip: Tip, ) -> (Self, JoinHandle<()>) where - Version: ToString, + VersionString: ToString + Clone + Send + 'static, + UserAgentString: ToString + Clone + Send + 'static, >::Future: Send, >::Future: Send, { let (runner, queue_sender) = Queue::start(); - let mut app_version = app_version.to_string(); + let mut build_version = build_version.to_string(); + let user_agent = user_agent.to_string(); // Match zcashd's version format, if the version string has anything in it - if !app_version.is_empty() && !app_version.starts_with('v') { - app_version.insert(0, 'v'); + if !build_version.is_empty() && !build_version.starts_with('v') { + build_version.insert(0, 'v'); } let rpc_impl = RpcImpl { - app_version, + build_version, + user_agent, network, debug_force_finished_sync, debug_like_zcashd, @@ -364,25 +375,10 @@ where State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, { - #[allow(clippy::unwrap_in_result)] fn get_info(&self) -> Result { - // Build a [BIP 14] valid user agent with release info. - // - // [BIP 14]: https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki - let release_version = self - .app_version - // remove everything after the `+` character if any - .split('+') - .next() - .expect("always at least 1 slice") - // remove the previously added `v` character at the start since it's not a part of the user agent. - .strip_prefix('v') - .expect("we are always expecting the `v` prefix"); - let user_agent = format!("/Zebra:{release_version}/"); - let response = GetInfo { - build: self.app_version.clone(), - subversion: user_agent, + build: self.build_version.clone(), + subversion: self.user_agent.clone(), }; Ok(response) @@ -748,11 +744,68 @@ where // this needs a new state request for the height -> hash index let height = hash_or_height.height(); + // Sapling trees + // + // # Concurrency + // + // We look up by block hash so the hash, transaction IDs, and confirmations + // are consistent. + let request = zebra_state::ReadRequest::SaplingTree(hash.into()); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| Error { + code: ErrorCode::ServerError(0), + message: error.to_string(), + data: None, + })?; + + let sapling_note_commitment_tree_count = match response { + zebra_state::ReadResponse::SaplingTree(Some(nct)) => nct.count(), + zebra_state::ReadResponse::SaplingTree(None) => 0, + _ => unreachable!("unmatched response to a SaplingTree request"), + }; + + // Orchard trees + // + // # Concurrency + // + // We look up by block hash so the hash, transaction IDs, and confirmations + // are consistent. + let request = zebra_state::ReadRequest::OrchardTree(hash.into()); + let response = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_err(|error| Error { + code: ErrorCode::ServerError(0), + message: error.to_string(), + data: None, + })?; + + let orchard_note_commitment_tree_count = match response { + zebra_state::ReadResponse::OrchardTree(Some(nct)) => nct.count(), + zebra_state::ReadResponse::OrchardTree(None) => 0, + _ => unreachable!("unmatched response to a OrchardTree request"), + }; + + let sapling = SaplingTrees { + size: sapling_note_commitment_tree_count, + }; + + let orchard = OrchardTrees { + size: orchard_note_commitment_tree_count, + }; + + let trees = GetBlockTrees { sapling, orchard }; + Ok(GetBlock::Object { hash: GetBlockHash(hash), confirmations, height, tx, + trees, }) } else { Err(Error { @@ -1366,6 +1419,9 @@ pub enum GetBlock { // // TODO: use a typed Vec here tx: Vec, + + /// Information about the note commitment trees. + trees: GetBlockTrees, }, } @@ -1528,6 +1584,39 @@ impl GetRawTransaction { } } +/// Information about the sapling and orchard note commitment trees if any. +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct GetBlockTrees { + #[serde(skip_serializing_if = "SaplingTrees::is_empty")] + sapling: SaplingTrees, + #[serde(skip_serializing_if = "OrchardTrees::is_empty")] + orchard: OrchardTrees, +} + +/// Sapling note commitment tree information. +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct SaplingTrees { + size: u64, +} + +impl SaplingTrees { + fn is_empty(&self) -> bool { + self.size == 0 + } +} + +/// Orchard note commitment tree information. +#[derive(Copy, Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)] +pub struct OrchardTrees { + size: u64, +} + +impl OrchardTrees { + fn is_empty(&self) -> bool { + self.size == 0 + } +} + /// Check if provided height range is valid for address indexes. fn check_height_range(start: Height, end: Height, chain_height: Height) -> Result<()> { if start == Height(0) || end == Height(0) { diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index 5a94be6ceb0..ca861e5440b 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -24,7 +24,7 @@ use zebra_chain::{ }; use zebra_consensus::{ funding_stream_address, funding_stream_values, height_for_first_halving, miner_subsidy, - VerifyChainError, + RouterError, }; use zebra_network::AddressBookPeers; use zebra_node_services::mempool; @@ -110,8 +110,7 @@ pub trait GetBlockTemplateRpc { /// - the parent block is a valid block that Zebra already has, or will receive soon. /// /// Zebra verifies blocks in parallel, and keeps recent chains in parallel, - /// so moving between chains is very cheap. (But forking a new chain may take some time, - /// until bug #4794 is fixed.) + /// so moving between chains and forking chains is very cheap. /// /// This rpc method is available only if zebra is built with `--features getblocktemplate-rpcs`. #[rpc(name = "getblocktemplate")] @@ -218,8 +217,14 @@ pub trait GetBlockTemplateRpc { } /// RPC method implementations. -pub struct GetBlockTemplateRpcImpl -where +pub struct GetBlockTemplateRpcImpl< + Mempool, + State, + Tip, + BlockVerifierRouter, + SyncStatus, + AddressBook, +> where Mempool: Service< mempool::Request, Response = mempool::Response, @@ -230,7 +235,7 @@ where Response = zebra_state::ReadResponse, Error = zebra_state::BoxError, >, - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync @@ -268,7 +273,7 @@ where latest_chain_tip: Tip, /// The chain verifier, used for submitting blocks. - chain_verifier: ChainVerifier, + block_verifier_router: BlockVerifierRouter, /// The chain sync status, used for checking if Zebra is likely close to the network chain tip. sync_status: SyncStatus, @@ -277,8 +282,8 @@ where address_book: AddressBook, } -impl - GetBlockTemplateRpcImpl +impl + GetBlockTemplateRpcImpl where Mempool: Service< mempool::Request, @@ -294,7 +299,7 @@ where + Sync + 'static, Tip: ChainTip + Clone + Send + Sync + 'static, - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync @@ -314,7 +319,7 @@ where mempool: Buffer, state: State, latest_chain_tip: Tip, - chain_verifier: ChainVerifier, + block_verifier_router: BlockVerifierRouter, sync_status: SyncStatus, address_book: AddressBook, ) -> Self { @@ -353,15 +358,15 @@ where mempool, state, latest_chain_tip, - chain_verifier, + block_verifier_router, sync_status, address_book, } } } -impl GetBlockTemplateRpc - for GetBlockTemplateRpcImpl +impl GetBlockTemplateRpc + for GetBlockTemplateRpcImpl where Mempool: Service< mempool::Request, @@ -379,12 +384,12 @@ where + 'static, >::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync + 'static, - >::Future: Send, + >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { @@ -449,7 +454,7 @@ where .and_then(get_block_template::JsonParameters::block_proposal_data) { return validate_block_proposal( - self.chain_verifier.clone(), + self.block_verifier_router.clone(), block_proposal_bytes, network, latest_chain_tip, @@ -516,15 +521,15 @@ where // // Optional TODO: // - add a `MempoolChange` type with an `async changed()` method (like `ChainTip`) - let Some(mempool_txs) = - fetch_mempool_transactions(mempool.clone(), tip_hash) - .await? - // If the mempool and state responses are out of sync: - // - if we are not long polling, omit mempool transactions from the template, - // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. - .or_else(|| client_long_poll_id.is_none().then(Vec::new)) else { - continue; - }; + let Some(mempool_txs) = fetch_mempool_transactions(mempool.clone(), tip_hash) + .await? + // If the mempool and state responses are out of sync: + // - if we are not long polling, omit mempool transactions from the template, + // - if we are long polling, continue to the next iteration of the loop to make fresh state and mempool requests. + .or_else(|| client_long_poll_id.is_none().then(Vec::new)) + else { + continue; + }; // - Long poll ID calculation let server_long_poll_id = LongPollInput::new( @@ -732,13 +737,13 @@ where HexData(block_bytes): HexData, _parameters: Option, ) -> BoxFuture> { - let mut chain_verifier = self.chain_verifier.clone(); + let mut block_verifier_router = self.block_verifier_router.clone(); async move { let block: Block = match block_bytes.zcash_deserialize_into() { Ok(block_bytes) => block_bytes, Err(error) => { - tracing::info!(?error, "submit block failed"); + tracing::info!(?error, "submit block failed: block bytes could not be deserialized into a structurally valid block"); return Ok(submit_block::ErrorResponse::Rejected.into()); } @@ -748,8 +753,9 @@ where .coinbase_height() .map(|height| height.0.to_string()) .unwrap_or_else(|| "invalid coinbase height".to_string()); + let block_hash = block.hash(); - let chain_verifier_response = chain_verifier + let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { @@ -760,7 +766,7 @@ where .call(zebra_consensus::Request::Commit(Arc::new(block))) .await; - let chain_error = match chain_verifier_response { + let chain_error = match block_verifier_router_response { // Currently, this match arm returns `null` (Accepted) for blocks committed // to any chain, but Accepted is only for blocks in the best chain. // @@ -776,11 +782,10 @@ where // by downcasting from Any to VerifyChainError. Err(box_error) => { let error = box_error - .downcast::() + .downcast::() .map(|boxed_chain_error| *boxed_chain_error); - // TODO: add block hash to error? - tracing::info!(?error, ?block_height, "submit block failed"); + tracing::info!(?error, ?block_hash, ?block_height, "submit block failed verification"); error } @@ -803,7 +808,7 @@ where // and return a duplicate error for the newer request immediately. // This improves the speed of the RPC response. // - // Checking the download queues and ChainVerifier buffer for duplicates + // Checking the download queues and BlockVerifierRouter buffer for duplicates // might require architectural changes to Zebra, so we should only do it // if mining pools really need it. Ok(_verify_chain_error) => submit_block::ErrorResponse::Rejected, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs index 8439808fe70..0e496ad37fa 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/get_block_template.rs @@ -39,8 +39,8 @@ pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; /// Returns an error if there's a mismatch between the mode and whether `data` is provided. pub fn check_parameters(parameters: &Option) -> Result<()> { let Some(parameters) = parameters else { - return Ok(()) - }; + return Ok(()); + }; match parameters { JsonParameters { @@ -97,15 +97,15 @@ pub fn check_miner_address( /// usual acceptance rules (except proof-of-work). /// /// Returns a `getblocktemplate` [`Response`]. -pub async fn validate_block_proposal( - mut chain_verifier: ChainVerifier, +pub async fn validate_block_proposal( + mut block_verifier_router: BlockVerifierRouter, block_proposal_bytes: Vec, network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result where - ChainVerifier: Service + BlockVerifierRouter: Service + Clone + Send + Sync @@ -129,7 +129,7 @@ where } }; - let chain_verifier_response = chain_verifier + let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { @@ -140,12 +140,12 @@ where .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; - Ok(chain_verifier_response + Ok(block_verifier_router_response .map(|_hash| ProposalResponse::Valid) .unwrap_or_else(|verify_chain_error| { tracing::info!( ?verify_chain_error, - "error response from chain_verifier in CheckProposal request" + "error response from block_verifier_router in CheckProposal request" ); ProposalResponse::rejected("invalid proposal", verify_chain_error) @@ -267,7 +267,8 @@ where let mempool::Response::FullTransactions { transactions, last_seen_tip_hash, - } = response else { + } = response + else { unreachable!("unmatched response to a mempool::FullTransactions request") }; diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs index df85939e4ff..617b80080c2 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_block_template.rs @@ -1,6 +1,8 @@ //! The `GetBlockTempate` type is the output of the `getblocktemplate` RPC method in the //! default 'template' mode. See [`ProposalResponse`] for the output in 'proposal' mode. +use std::fmt; + use zebra_chain::{ amount, block::{ChainHistoryBlockTxAuthCommitmentHash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, @@ -34,7 +36,7 @@ pub use parameters::{GetBlockTemplateCapability, GetBlockTemplateRequestMode, Js pub use proposal::{proposal_block_from_template, ProposalResponse}; /// A serialized `getblocktemplate` RPC response in template mode. -#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)] pub struct GetBlockTemplate { /// The getblocktemplate RPC capabilities supported by Zebra. /// @@ -167,6 +169,43 @@ pub struct GetBlockTemplate { pub submit_old: Option, } +impl fmt::Debug for GetBlockTemplate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // A block with a lot of transactions can be extremely long in logs. + let mut transactions_truncated = self.transactions.clone(); + if self.transactions.len() > 4 { + // Remove transaction 3 onwards, but leave the last transaction + let end = self.transactions.len() - 2; + transactions_truncated.splice(3..=end, Vec::new()); + } + + f.debug_struct("GetBlockTemplate") + .field("capabilities", &self.capabilities) + .field("version", &self.version) + .field("previous_block_hash", &self.previous_block_hash) + .field("block_commitments_hash", &self.block_commitments_hash) + .field("light_client_root_hash", &self.light_client_root_hash) + .field("final_sapling_root_hash", &self.final_sapling_root_hash) + .field("default_roots", &self.default_roots) + .field("transaction_count", &self.transactions.len()) + .field("transactions", &transactions_truncated) + .field("coinbase_txn", &self.coinbase_txn) + .field("long_poll_id", &self.long_poll_id) + .field("target", &self.target) + .field("min_time", &self.min_time) + .field("mutable", &self.mutable) + .field("nonce_range", &self.nonce_range) + .field("sigop_limit", &self.sigop_limit) + .field("size_limit", &self.size_limit) + .field("cur_time", &self.cur_time) + .field("bits", &self.bits) + .field("height", &self.height) + .field("max_time", &self.max_time) + .field("submit_old", &self.submit_old) + .finish() + } +} + impl GetBlockTemplate { /// Returns a `Vec` of capabilities supported by the `getblocktemplate` RPC pub fn capabilities() -> Vec { diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs index 3ac548596cb..3da55de3fa7 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs @@ -25,7 +25,7 @@ impl Response { networksolps, networkhashps: networksolps, chain: network.bip70_network_name(), - testnet: network == Network::Testnet, + testnet: network.is_a_test_network(), } } } diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index 9d2e6610b7d..154f5d8c973 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -40,6 +40,7 @@ proptest! { let mut mempool = MockService::build().for_prop_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -76,7 +77,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -94,6 +95,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -137,7 +139,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -153,6 +155,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -197,7 +200,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -220,6 +223,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -251,7 +255,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -276,6 +280,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -307,7 +312,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -330,6 +335,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -405,7 +411,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -430,6 +436,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -461,7 +468,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -488,6 +495,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -519,7 +527,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -535,6 +543,7 @@ proptest! { // look for an error with a `NoChainTip` let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -552,7 +561,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); runtime.block_on(async move { mempool.expect_no_requests().await?; @@ -585,6 +594,7 @@ proptest! { // Start RPC with the mocked `ChainTip` let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -631,7 +641,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); // check no requests were made during this test runtime.block_on(async move { @@ -671,6 +681,7 @@ proptest! { // Start RPC with the mocked `ChainTip` runtime.block_on(async move { let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -734,6 +745,7 @@ proptest! { // Start RPC with the mocked `ChainTip` runtime.block_on(async move { let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -785,6 +797,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -855,7 +868,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; @@ -874,6 +887,7 @@ proptest! { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_prop_tests(); let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -955,7 +969,7 @@ proptest! { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - prop_assert!(matches!(rpc_tx_queue_task_result, None)); + prop_assert!(rpc_tx_queue_task_result.is_none()); Ok::<_, TestCaseError>(()) })?; diff --git a/zebra-rpc/src/methods/tests/snapshot.rs b/zebra-rpc/src/methods/tests/snapshot.rs index 84af5c0883e..a3fa80dc31c 100644 --- a/zebra-rpc/src/methods/tests/snapshot.rs +++ b/zebra-rpc/src/methods/tests/snapshot.rs @@ -72,6 +72,7 @@ async fn test_rpc_response_data_for_network(network: Network) { // Init RPC let (rpc, _rpc_tx_queue_task_handle) = RpcImpl::new( "RPC test", + "/Zebra:RPC test/", network, false, true, diff --git a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs index 5b8155f209d..ab57b7b1e10 100644 --- a/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs @@ -5,7 +5,10 @@ //! cargo insta test --review --features getblocktemplate-rpcs --delete-unreferenced-snapshots //! ``` -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Instant, +}; use hex::FromHex; use insta::Settings; @@ -82,11 +85,11 @@ pub async fn test_responses( >::Future: Send, { let ( - chain_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), network, state.clone(), @@ -133,8 +136,7 @@ pub async fn test_responses( ) .into(), ) - .into_new_meta_addr() - .unwrap()]); + .into_new_meta_addr(Instant::now(), DateTime32::now())]); // get an rpc instance with continuous blockchain state let get_block_template_rpc = GetBlockTemplateRpcImpl::new( @@ -143,7 +145,7 @@ pub async fn test_responses( Buffer::new(mempool.clone(), 1), read_state, mock_chain_tip.clone(), - chain_verifier.clone(), + block_verifier_router.clone(), mock_sync_status.clone(), mock_address_book, ); @@ -265,7 +267,7 @@ pub async fn test_responses( Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip.clone(), - chain_verifier, + block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), ); @@ -284,10 +286,11 @@ pub async fn test_responses( mock_read_state_request_handler, ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; let coinbase_tx: Transaction = get_block_template .coinbase_txn @@ -328,10 +331,11 @@ pub async fn test_responses( mock_read_state_request_handler, ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; let coinbase_tx: Transaction = get_block_template .coinbase_txn @@ -363,16 +367,16 @@ pub async fn test_responses( snapshot_rpc_getblocktemplate("invalid-proposal", get_block_template, None, &settings); - // the following snapshots use a mock read_state and chain_verifier + // the following snapshots use a mock read_state and block_verifier_router - let mut mock_chain_verifier = MockService::build().for_unit_tests(); + let mut mock_block_verifier_router = MockService::build().for_unit_tests(); let get_block_template_rpc_mock_state_verifier = GetBlockTemplateRpcImpl::new( network, mining_config, Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - mock_chain_verifier.clone(), + mock_block_verifier_router.clone(), mock_sync_status, MockAddressBookPeers::default(), ); @@ -385,15 +389,17 @@ pub async fn test_responses( }), ); - let mock_chain_verifier_request_handler = async move { - mock_chain_verifier + let mock_block_verifier_router_request_handler = async move { + mock_block_verifier_router .expect_request_that(|req| matches!(req, zebra_consensus::Request::CheckProposal(_))) .await .respond(Hash::from([0; 32])); }; - let (get_block_template, ..) = - tokio::join!(get_block_template_fut, mock_chain_verifier_request_handler,); + let (get_block_template, ..) = tokio::join!( + get_block_template_fut, + mock_block_verifier_router_request_handler, + ); let get_block_template = get_block_template.expect("unexpected error in getblocktemplate RPC call"); diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap index d4d6b540a83..6bed7d59cd2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@mainnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap index 393f918ebef..fe2c9527562 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_1@testnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap index d4d6b540a83..6bed7d59cd2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@mainnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap index 393f918ebef..fe2c9527562 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_hash_verbosity_default@testnet_10.snap @@ -7,5 +7,6 @@ expression: block "confirmations": 10, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap index ad487d39140..3d66b2dffa2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@mainnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap index 02469914e6d..f79a4283b50 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_1@testnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap index ad487d39140..3d66b2dffa2 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@mainnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap index 02469914e6d..f79a4283b50 100644 --- a/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshots/get_block_verbose_height_verbosity_default@testnet_10.snap @@ -8,5 +8,6 @@ expression: block "height": 1, "tx": [ "f37e9f691fffb635de0999491d906ee85ba40cd36dae9f6e5911a8277d7c5f75" - ] + ], + "trees": {} } diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 02ccd9bc36b..6490b8c88dc 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -29,6 +29,7 @@ async fn rpc_getinfo() { let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "RPC test", + "/Zebra:RPC test/", Mainnet, false, true, @@ -52,7 +53,7 @@ async fn rpc_getinfo() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -72,6 +73,7 @@ async fn rpc_getblock() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -119,6 +121,11 @@ async fn rpc_getblock() { assert_eq!(get_block, expected_result); } + // Create empty note commitment tree information. + let sapling = SaplingTrees { size: 0 }; + let orchard = OrchardTrees { size: 0 }; + let trees = GetBlockTrees { sapling, orchard }; + // Make height calls with verbosity=1 and check response for (i, block) in blocks.iter().enumerate() { let get_block = rpc @@ -137,6 +144,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -159,6 +167,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -181,6 +190,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -203,6 +213,7 @@ async fn rpc_getblock() { .iter() .map(|tx| tx.hash().encode_hex()) .collect(), + trees, } ); } @@ -211,7 +222,7 @@ async fn rpc_getblock() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -223,6 +234,7 @@ async fn rpc_getblock_parse_error() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -253,7 +265,7 @@ async fn rpc_getblock_parse_error() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -265,6 +277,7 @@ async fn rpc_getblock_missing_error() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -307,7 +320,7 @@ async fn rpc_getblock_missing_error() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -333,6 +346,7 @@ async fn rpc_getbestblockhash() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -355,7 +369,7 @@ async fn rpc_getbestblockhash() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -378,6 +392,7 @@ async fn rpc_getrawtransaction() { // Init RPC let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -451,7 +466,12 @@ async fn rpc_getrawtransaction() { } let (response, _) = futures::join!(get_tx_verbose_1_req, make_mempool_req(tx_hash)); - let GetRawTransaction::Object { hex, height, confirmations } = response.expect("We should have a GetRawTransaction struct") else { + let GetRawTransaction::Object { + hex, + height, + confirmations, + } = response.expect("We should have a GetRawTransaction struct") + else { unreachable!("Should return a Raw enum") }; @@ -519,7 +539,7 @@ async fn rpc_getrawtransaction() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -539,6 +559,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -625,7 +646,7 @@ async fn rpc_getaddresstxids_invalid_arguments() { // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -682,6 +703,7 @@ async fn rpc_getaddresstxids_response_with( zebra_state::populated_state(blocks.to_owned(), network).await; let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( + "RPC test", "RPC test", network, false, @@ -733,6 +755,7 @@ async fn rpc_getaddressutxos_invalid_arguments() { let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let rpc = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -781,6 +804,7 @@ async fn rpc_getaddressutxos_response() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let rpc = RpcImpl::new( + "RPC test", "RPC test", Mainnet, false, @@ -830,11 +854,11 @@ async fn rpc_getblockcount() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let ( - chain_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -849,7 +873,7 @@ async fn rpc_getblockcount() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -880,11 +904,11 @@ async fn rpc_getblockcount_empty_state() { zebra_state::init_test_services(Mainnet); let ( - chain_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -899,7 +923,7 @@ async fn rpc_getblockcount_empty_state() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -932,11 +956,11 @@ async fn rpc_getpeerinfo() { zebra_state::init_test_services(Mainnet); let ( - chain_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), network, state.clone(), @@ -951,8 +975,10 @@ async fn rpc_getpeerinfo() { ) .into(), ) - .into_new_meta_addr() - .unwrap(); + .into_new_meta_addr( + std::time::Instant::now(), + zebra_chain::serialization::DateTime32::now(), + ); let mock_address_book = MockAddressBookPeers::new(vec![mock_peer_address]); @@ -963,7 +989,7 @@ async fn rpc_getpeerinfo() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + block_verifier_router, MockSyncStatus::default(), mock_address_book, ); @@ -1005,11 +1031,11 @@ async fn rpc_getblockhash() { zebra_state::populated_state(blocks.clone(), Mainnet).await; let ( - chain_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -1024,7 +1050,7 @@ async fn rpc_getblockhash() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - tower::ServiceBuilder::new().service(chain_verifier), + tower::ServiceBuilder::new().service(block_verifier_router), MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -1168,7 +1194,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { block::{Hash, MAX_BLOCK_BYTES, ZCASH_BLOCK_VERSION}, chain_sync_status::MockSyncStatus, serialization::DateTime32, - transaction::VerifiedUnminedTx, + transaction::{zip317, VerifiedUnminedTx}, work::difficulty::{CompactDifficulty, ExpandedDifficulty, U256}, }; use zebra_consensus::MAX_BLOCK_SIGOPS; @@ -1193,7 +1219,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let read_state = MockService::build().for_unit_tests(); - let chain_verifier = MockService::build().for_unit_tests(); + let block_verifier_router = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1234,7 +1260,7 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - chain_verifier, + block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), ); @@ -1279,10 +1305,11 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { make_mock_read_state_request_handler(), ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; assert_eq!( get_block_template.capabilities, @@ -1423,10 +1450,13 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { conventional_fee: 0.try_into().unwrap(), }; + let conventional_actions = zip317::conventional_actions(&unmined_tx.transaction); + let verified_unmined_tx = VerifiedUnminedTx { transaction: unmined_tx, miner_fee: 0.try_into().unwrap(), legacy_sigop_count: 0, + conventional_actions, unpaid_actions: 0, fee_weight_ratio: 1.0, }; @@ -1444,10 +1474,11 @@ async fn rpc_getblocktemplate_mining_address(use_p2pkh: bool) { make_mock_read_state_request_handler(), ); - let get_block_template::Response::TemplateMode(get_block_template) = get_block_template - .expect("unexpected error in getblocktemplate RPC call") else { - panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") - }; + let get_block_template::Response::TemplateMode(get_block_template) = + get_block_template.expect("unexpected error in getblocktemplate RPC call") + else { + panic!("this getblocktemplate call without parameters should return the `TemplateMode` variant of the response") + }; // mempool transactions should be omitted if the tip hash in the GetChainInfo response from the state // does not match the `last_seen_tip_hash` in the FullTransactions response from the mempool. @@ -1479,11 +1510,11 @@ async fn rpc_submitblock_errors() { // Init RPCs let ( - chain_verifier, + block_verifier_router, _transaction_verifier, _parameter_download_task_handle, _max_checkpoint_height, - ) = zebra_consensus::chain::init( + ) = zebra_consensus::router::init( zebra_consensus::Config::default(), Mainnet, state.clone(), @@ -1498,7 +1529,7 @@ async fn rpc_submitblock_errors() { Buffer::new(mempool.clone(), 1), read_state, latest_chain_tip.clone(), - chain_verifier, + block_verifier_router, MockSyncStatus::default(), MockAddressBookPeers::default(), ); @@ -1646,7 +1677,7 @@ async fn rpc_getdifficulty() { let mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let read_state = MockService::build().for_unit_tests(); - let chain_verifier = MockService::build().for_unit_tests(); + let block_verifier_router = MockService::build().for_unit_tests(); let mut mock_sync_status = MockSyncStatus::default(); mock_sync_status.set_is_close_to_tip(true); @@ -1681,7 +1712,7 @@ async fn rpc_getdifficulty() { Buffer::new(mempool.clone(), 1), read_state.clone(), mock_chain_tip, - chain_verifier, + block_verifier_router, mock_sync_status.clone(), MockAddressBookPeers::default(), ); diff --git a/zebra-rpc/src/queue/tests/prop.rs b/zebra-rpc/src/queue/tests/prop.rs index c250af68e90..1db9a340f2e 100644 --- a/zebra-rpc/src/queue/tests/prop.rs +++ b/zebra-rpc/src/queue/tests/prop.rs @@ -277,7 +277,7 @@ proptest! { block.transactions.push(Arc::new(transaction.clone())); // commit the created block - let request = zebra_state::Request::CommitFinalizedBlock(zebra_state::FinalizedBlock::from(Arc::new(block.clone()))); + let request = zebra_state::Request::CommitCheckpointVerifiedBlock(zebra_state::CheckpointVerifiedBlock::from(Arc::new(block.clone()))); let send_task = tokio::spawn(write_state.clone().oneshot(request.clone())); let response = zebra_state::Response::Committed(block.hash()); diff --git a/zebra-rpc/src/server.rs b/zebra-rpc/src/server.rs index 0a52a1ecabb..d1bdd7ed920 100644 --- a/zebra-rpc/src/server.rs +++ b/zebra-rpc/src/server.rs @@ -43,9 +43,16 @@ mod tests; /// Zebra RPC Server #[derive(Clone)] pub struct RpcServer { + /// The RPC config. config: Config, + + /// The configured network. network: Network, - app_version: String, + + /// Zebra's application version, with build metadata. + build_version: String, + + /// A handle that shuts down the RPC server. close_handle: CloseHandle, } @@ -54,7 +61,7 @@ impl fmt::Debug for RpcServer { f.debug_struct("RpcServer") .field("config", &self.config) .field("network", &self.network) - .field("app_version", &self.app_version) + .field("build_version", &self.build_version) .field( "close_handle", // TODO: when it stabilises, use std::any::type_name_of_val(&self.close_handle) @@ -66,25 +73,39 @@ impl fmt::Debug for RpcServer { impl RpcServer { /// Start a new RPC server endpoint using the supplied configs and services. - /// `app_version` is a version string for the application, which is used in RPC responses. + /// + /// `build_version` and `user_agent` are version strings for the application, + /// which are used in RPC responses. /// /// Returns [`JoinHandle`]s for the RPC server and `sendrawtransaction` queue tasks, /// and a [`RpcServer`] handle, which can be used to shut down the RPC server task. // - // TODO: put some of the configs or services in their own struct? + // TODO: + // - put some of the configs or services in their own struct? + // - replace VersionString with semver::Version, and update the tests to provide valid versions #[allow(clippy::too_many_arguments)] - pub fn spawn( + pub fn spawn< + VersionString, + UserAgentString, + Mempool, + State, + Tip, + BlockVerifierRouter, + SyncStatus, + AddressBook, + >( config: Config, #[cfg(feature = "getblocktemplate-rpcs")] mining_config: get_block_template_rpcs::config::Config, #[cfg(not(feature = "getblocktemplate-rpcs"))] #[allow(unused_variables)] mining_config: (), - app_version: Version, + build_version: VersionString, + user_agent: UserAgentString, mempool: Buffer, state: State, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] - chain_verifier: ChainVerifier, + block_verifier_router: BlockVerifierRouter, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] sync_status: SyncStatus, #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))] @@ -93,7 +114,8 @@ impl RpcServer { network: Network, ) -> (JoinHandle<()>, JoinHandle<()>, Option) where - Version: ToString + Clone + Send + 'static, + VersionString: ToString + Clone + Send + 'static, + UserAgentString: ToString + Clone + Send + 'static, Mempool: tower::Service< mempool::Request, Response = mempool::Response, @@ -110,7 +132,7 @@ impl RpcServer { + 'static, State::Future: Send, Tip: ChainTip + Clone + Send + Sync + 'static, - ChainVerifier: Service< + BlockVerifierRouter: Service< zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError, @@ -118,7 +140,7 @@ impl RpcServer { + Send + Sync + 'static, - >::Future: Send, + >::Future: Send, SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static, AddressBook: AddressBookPeers + Clone + Send + Sync + 'static, { @@ -149,7 +171,7 @@ impl RpcServer { mempool.clone(), state.clone(), latest_chain_tip.clone(), - chain_verifier, + block_verifier_router, sync_status, address_book, ); @@ -159,7 +181,8 @@ impl RpcServer { // Initialize the rpc methods with the zebra version let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new( - app_version.clone(), + build_version.clone(), + user_agent, network, config.debug_force_finished_sync, #[cfg(feature = "getblocktemplate-rpcs")] @@ -202,7 +225,7 @@ impl RpcServer { let rpc_server_handle = RpcServer { config, network, - app_version: app_version.to_string(), + build_version: build_version.to_string(), close_handle, }; diff --git a/zebra-rpc/src/server/http_request_compatibility.rs b/zebra-rpc/src/server/http_request_compatibility.rs index 63f445e917b..99e604843fb 100644 --- a/zebra-rpc/src/server/http_request_compatibility.rs +++ b/zebra-rpc/src/server/http_request_compatibility.rs @@ -43,8 +43,8 @@ impl RequestMiddleware for FixHttpRequestMiddleware { ) -> jsonrpc_http_server::RequestMiddlewareAction { tracing::trace!(?request, "original HTTP request"); - // Fix the request headers - FixHttpRequestMiddleware::add_missing_content_type_header(request.headers_mut()); + // Fix the request headers if needed and we can do so. + FixHttpRequestMiddleware::insert_or_replace_content_type_header(request.headers_mut()); // Fix the request body let request = request.map(|body| { @@ -103,11 +103,44 @@ impl FixHttpRequestMiddleware { .replace(", \"jsonrpc\": \"1.0\"", "") } - /// If the `content-type` HTTP header is not present, - /// add an `application/json` content type header. - pub fn add_missing_content_type_header(headers: &mut hyper::header::HeaderMap) { - headers - .entry(hyper::header::CONTENT_TYPE) - .or_insert(hyper::header::HeaderValue::from_static("application/json")); + /// Insert or replace client supplied `content-type` HTTP header to `application/json` in the following cases: + /// + /// - no `content-type` supplied. + /// - supplied `content-type` start with `text/plain`, for example: + /// - `text/plain` + /// - `text/plain;` + /// - `text/plain; charset=utf-8` + /// + /// `application/json` is the only `content-type` accepted by the Zebra rpc endpoint: + /// + /// + /// + /// # Security + /// + /// - `content-type` headers exist so that applications know they are speaking the correct protocol with the correct format. + /// We can be a bit flexible, but there are some types (such as binary) we shouldn't allow. + /// In particular, the "application/x-www-form-urlencoded" header should be rejected, so browser forms can't be used to attack + /// a local RPC port. See "The Role of Routers in the CSRF Attack" in + /// + /// - Checking all the headers is secure, but only because hyper has custom code that just reads the first content-type header. + /// + pub fn insert_or_replace_content_type_header(headers: &mut hyper::header::HeaderMap) { + if !headers.contains_key(hyper::header::CONTENT_TYPE) + || headers + .get(hyper::header::CONTENT_TYPE) + .filter(|value| { + value + .to_str() + .ok() + .unwrap_or_default() + .starts_with("text/plain") + }) + .is_some() + { + headers.insert( + hyper::header::CONTENT_TYPE, + hyper::header::HeaderValue::from_static("application/json"), + ); + } } } diff --git a/zebra-rpc/src/server/rpc_call_compatibility.rs b/zebra-rpc/src/server/rpc_call_compatibility.rs index e6f44c1eb98..c3974ac3cf8 100644 --- a/zebra-rpc/src/server/rpc_call_compatibility.rs +++ b/zebra-rpc/src/server/rpc_call_compatibility.rs @@ -12,7 +12,7 @@ use jsonrpc_core::{ BoxFuture, ErrorCode, Metadata, MethodCall, Notification, }; -use crate::constants::INVALID_PARAMETERS_ERROR_CODE; +use crate::constants::{INVALID_PARAMETERS_ERROR_CODE, MAX_PARAMS_LOG_LENGTH}; /// JSON-RPC [`Middleware`] with compatibility workarounds. /// @@ -75,10 +75,22 @@ impl FixRpcResponseMiddleware { fn call_description(call: &Call) -> String { match call { Call::MethodCall(MethodCall { method, params, .. }) => { - format!(r#"method = {method:?}, params = {params:?}"#) + let mut params = format!("{params:?}"); + if params.len() >= MAX_PARAMS_LOG_LENGTH { + params.truncate(MAX_PARAMS_LOG_LENGTH); + params.push_str("..."); + } + + format!(r#"method = {method:?}, params = {params}"#) } Call::Notification(Notification { method, params, .. }) => { - format!(r#"notification = {method:?}, params = {params:?}"#) + let mut params = format!("{params:?}"); + if params.len() >= MAX_PARAMS_LOG_LENGTH { + params.truncate(MAX_PARAMS_LOG_LENGTH); + params.push_str("..."); + } + + format!(r#"notification = {method:?}, params = {params}"#) } Call::Invalid { .. } => "invalid request".to_owned(), } diff --git a/zebra-rpc/src/server/tests/vectors.rs b/zebra-rpc/src/server/tests/vectors.rs index ad7bb8b050a..2622b84ba86 100644 --- a/zebra-rpc/src/server/tests/vectors.rs +++ b/zebra-rpc/src/server/tests/vectors.rs @@ -1,5 +1,8 @@ //! Fixed test vectors for the RPC server. +// These tests call functions which can take unit arguments if some features aren't enabled. +#![allow(clippy::unit_arg)] + use std::{ net::{Ipv4Addr, SocketAddrV4}, time::Duration, @@ -49,7 +52,7 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { rt.block_on(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server..."); @@ -58,9 +61,10 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { config, Default::default(), "RPC server test", + "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -71,14 +75,14 @@ fn rpc_server_spawn(parallel_cpu_threads: bool) { mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // The server and queue tasks should continue without errors or panics let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(matches!(rpc_server_task_result, None)); + assert!(rpc_server_task_result.is_none()); let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); }); info!("waiting for RPC server to shut down..."); @@ -135,7 +139,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo rt.block_on(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server..."); @@ -144,9 +148,10 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo config, Default::default(), "RPC server test", + "RPC server test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -157,7 +162,7 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; if do_shutdown { rpc_server @@ -181,10 +186,10 @@ fn rpc_server_spawn_unallocated_port(parallel_cpu_threads: bool, do_shutdown: bo } else { // The server and queue tasks should continue without errors or panics let rpc_server_task_result = rpc_server_task_handle.now_or_never(); - assert!(matches!(rpc_server_task_result, None)); + assert!(rpc_server_task_result.is_none()); let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); - assert!(matches!(rpc_tx_queue_task_result, None)); + assert!(rpc_tx_queue_task_result.is_none()); } }); @@ -214,7 +219,7 @@ fn rpc_server_spawn_port_conflict() { let test_task_handle = rt.spawn(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning RPC server 1..."); @@ -224,9 +229,10 @@ fn rpc_server_spawn_port_conflict() { config.clone(), Default::default(), "RPC server 1 test", + "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -241,9 +247,10 @@ fn rpc_server_spawn_port_conflict() { config, Default::default(), "RPC server 2 conflict test", + "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -254,7 +261,7 @@ fn rpc_server_spawn_port_conflict() { mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // Because there is a panic inside a multi-threaded executor, // we can't depend on the exact behaviour of the other tasks, @@ -322,7 +329,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { let test_task_handle = rt.spawn(async { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); let mut state: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - let mut chain_verifier: MockService<_, _, _, BoxError> = + let mut block_verifier_router: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); info!("spawning parallel RPC server 1..."); @@ -332,9 +339,10 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { config.clone(), Default::default(), "RPC server 1 test", + "RPC server 1 test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -349,9 +357,10 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { config, Default::default(), "RPC server 2 conflict test", + "RPC server 2 conflict test", Buffer::new(mempool.clone(), 1), Buffer::new(state.clone(), 1), - Buffer::new(chain_verifier.clone(), 1), + Buffer::new(block_verifier_router.clone(), 1), MockSyncStatus::default(), MockAddressBookPeers::default(), NoChainTip, @@ -362,7 +371,7 @@ fn rpc_server_spawn_port_conflict_parallel_auto() { mempool.expect_no_requests().await; state.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // Because there might be a panic inside a multi-threaded executor, // we can't depend on the exact behaviour of the other tasks, diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 7dd3582879f..00d59e4a9db 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,18 +1,25 @@ [package] name = "zebra-script" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] -zcash_script = "0.1.12" +zcash_script = "0.1.13" -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } -thiserror = "1.0.40" +thiserror = "1.0.44" displaydoc = "0.2.4" [dev-dependencies] diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index cf5be35b714..51e929bbcf4 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,10 +1,19 @@ [package] name = "zebra-state" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["asynchronous", "caching", "cryptography::cryptocurrencies"] + [features] # Production features that activate extra dependencies, or extra features in dependencies @@ -30,43 +39,46 @@ proptest-impl = [ elasticsearch = [ "dep:elasticsearch", "dep:serde_json", + "zebra-chain/elasticsearch", ] [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" futures = "0.3.28" hex = "0.4.3" -indexmap = "1.9.3" -itertools = "0.10.5" +indexmap = "2.0.0" +itertools = "0.11.0" lazy_static = "1.4.0" -metrics = "0.21.0" +metrics = "0.21.1" mset = "0.1.1" -regex = "1.8.1" -rlimit = "0.9.1" -rocksdb = { version = "0.21.0", default_features = false, features = ["lz4"] } -serde = { version = "1.0.163", features = ["serde_derive"] } -tempfile = "3.5.0" -thiserror = "1.0.40" +regex = "1.9.3" +rlimit = "0.10.1" +rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } +semver = "1.0.18" +serde = { version = "1.0.179", features = ["serde_derive"] } +tempfile = "3.7.1" +thiserror = "1.0.44" rayon = "1.7.0" -tokio = { version = "1.28.0", features = ["sync", "tracing"] } +tokio = { version = "1.29.1", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } tracing = "0.1.37" # elasticsearch specific dependencies. -elasticsearch = { version = "8.5.0-alpha.1", package = "elasticsearch", optional = true } -serde_json = { version = "1.0.96", package = "serde_json", optional = true } +# Security: avoid default dependency on openssl +elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } +serde_json = { version = "1.0.104", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", optional = true } -proptest = { version = "1.1.0", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.28", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } [dev-dependencies] @@ -75,19 +87,20 @@ color-eyre = "0.6.2" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -once_cell = "1.17.1" +once_cell = "1.18.0" spandoc = "0.2.2" hex = { version = "0.4.3", features = ["serde"] } -insta = { version = "1.29.0", features = ["ron"] } +insta = { version = "1.31.0", features = ["ron", "redactions"] } -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" +rand = "0.8.5" halo2 = { package = "halo2_proofs", version = "0.3.0" } jubjub = "0.10.0" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] } zebra-test = { path = "../zebra-test/" } diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index 849c047bff3..9f87c749c98 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -11,19 +11,19 @@ use zebra_chain::{ }; use crate::{ - request::ContextuallyValidBlock, service::chain_tip::ChainTipBlock, FinalizedBlock, - PreparedBlock, + request::ContextuallyVerifiedBlock, service::chain_tip::ChainTipBlock, CheckpointVerifiedBlock, + SemanticallyVerifiedBlock, }; /// Mocks computation done during semantic validation pub trait Prepare { /// Runs block semantic validation computation, and returns the result. /// Test-only method. - fn prepare(self) -> PreparedBlock; + fn prepare(self) -> SemanticallyVerifiedBlock; } impl Prepare for Arc { - fn prepare(self) -> PreparedBlock { + fn prepare(self) -> SemanticallyVerifiedBlock { let block = self; let hash = block.hash(); let height = block.coinbase_height().unwrap(); @@ -31,7 +31,7 @@ impl Prepare for Arc { let new_outputs = transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes); - PreparedBlock { + SemanticallyVerifiedBlock { block, hash, height, @@ -50,9 +50,9 @@ where } } -impl From for ChainTipBlock { - fn from(prepared: PreparedBlock) -> Self { - let PreparedBlock { +impl From for ChainTipBlock { + fn from(prepared: SemanticallyVerifiedBlock) -> Self { + let SemanticallyVerifiedBlock { block, hash, height, @@ -71,17 +71,17 @@ impl From for ChainTipBlock { } } -impl PreparedBlock { - /// Returns a [`ContextuallyValidBlock`] created from this block, +impl SemanticallyVerifiedBlock { + /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// with fake zero-valued spent UTXOs. /// /// Only for use in tests. #[cfg(test)] - pub fn test_with_zero_spent_utxos(&self) -> ContextuallyValidBlock { - ContextuallyValidBlock::test_with_zero_spent_utxos(self) + pub fn test_with_zero_spent_utxos(&self) -> ContextuallyVerifiedBlock { + ContextuallyVerifiedBlock::test_with_zero_spent_utxos(self) } - /// Returns a [`ContextuallyValidBlock`] created from this block, + /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// using a fake chain value pool change. /// /// Only for use in tests. @@ -89,26 +89,26 @@ impl PreparedBlock { pub fn test_with_chain_pool_change( &self, fake_chain_value_pool_change: ValueBalance, - ) -> ContextuallyValidBlock { - ContextuallyValidBlock::test_with_chain_pool_change(self, fake_chain_value_pool_change) + ) -> ContextuallyVerifiedBlock { + ContextuallyVerifiedBlock::test_with_chain_pool_change(self, fake_chain_value_pool_change) } - /// Returns a [`ContextuallyValidBlock`] created from this block, + /// Returns a [`ContextuallyVerifiedBlock`] created from this block, /// with no chain value pool change. /// /// Only for use in tests. #[cfg(test)] - pub fn test_with_zero_chain_pool_change(&self) -> ContextuallyValidBlock { - ContextuallyValidBlock::test_with_zero_chain_pool_change(self) + pub fn test_with_zero_chain_pool_change(&self) -> ContextuallyVerifiedBlock { + ContextuallyVerifiedBlock::test_with_zero_chain_pool_change(self) } } -impl ContextuallyValidBlock { +impl ContextuallyVerifiedBlock { /// Create a block that's ready for non-finalized `Chain` contextual - /// validation, using a [`PreparedBlock`] and fake zero-valued spent UTXOs. + /// validation, using a [`SemanticallyVerifiedBlock`] and fake zero-valued spent UTXOs. /// /// Only for use in tests. - pub fn test_with_zero_spent_utxos(block: impl Into) -> Self { + pub fn test_with_zero_spent_utxos(block: impl Into) -> Self { let block = block.into(); let zero_output = transparent::Output { @@ -128,19 +128,19 @@ impl ContextuallyValidBlock { .map(|outpoint| (outpoint, zero_utxo.clone())) .collect(); - ContextuallyValidBlock::with_block_and_spent_utxos(block, zero_spent_utxos) + ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, zero_spent_utxos) .expect("all UTXOs are provided with zero values") } - /// Create a [`ContextuallyValidBlock`] from a [`Block`] or [`PreparedBlock`], + /// Create a [`ContextuallyVerifiedBlock`] from a [`Block`] or [`SemanticallyVerifiedBlock`], /// using a fake chain value pool change. /// /// Only for use in tests. pub fn test_with_chain_pool_change( - block: impl Into, + block: impl Into, fake_chain_value_pool_change: ValueBalance, ) -> Self { - let PreparedBlock { + let SemanticallyVerifiedBlock { block, hash, height, @@ -162,20 +162,20 @@ impl ContextuallyValidBlock { } } - /// Create a [`ContextuallyValidBlock`] from a [`Block`] or [`PreparedBlock`], + /// Create a [`ContextuallyVerifiedBlock`] from a [`Block`] or [`SemanticallyVerifiedBlock`], /// with no chain value pool change. /// /// Only for use in tests. - pub fn test_with_zero_chain_pool_change(block: impl Into) -> Self { + pub fn test_with_zero_chain_pool_change(block: impl Into) -> Self { Self::test_with_chain_pool_change(block, ValueBalance::zero()) } } -impl FinalizedBlock { +impl CheckpointVerifiedBlock { /// Create a block that's ready to be committed to the finalized state, /// using a precalculated [`block::Hash`] and [`block::Height`]. /// - /// This is a test-only method, prefer [`FinalizedBlock::with_hash`]. + /// This is a test-only method, prefer [`CheckpointVerifiedBlock::with_hash`]. #[cfg(any(test, feature = "proptest-impl"))] pub fn with_hash_and_height( block: Arc, @@ -183,14 +183,15 @@ impl FinalizedBlock { height: block::Height, ) -> Self { let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect(); - let new_outputs = transparent::new_outputs_with_height(&block, height, &transaction_hashes); + let new_outputs = + transparent::new_ordered_outputs_with_height(&block, height, &transaction_hashes); - Self { + Self(SemanticallyVerifiedBlock { block, hash, height, new_outputs, transaction_hashes, - } + }) } } diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index b350f82ac24..69020061ff3 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -1,39 +1,43 @@ //! Cached state configuration for Zebra. use std::{ - fs::{canonicalize, remove_dir_all, DirEntry, ReadDir}, + fs::{self, canonicalize, remove_dir_all, DirEntry, ReadDir}, + io::ErrorKind, path::{Path, PathBuf}, }; +use semver::Version; use serde::{Deserialize, Serialize}; use tokio::task::{spawn_blocking, JoinHandle}; use tracing::Span; use zebra_chain::parameters::Network; +use crate::{ + constants::{ + DATABASE_FORMAT_MINOR_VERSION, DATABASE_FORMAT_PATCH_VERSION, DATABASE_FORMAT_VERSION, + DATABASE_FORMAT_VERSION_FILE_NAME, + }, + BoxError, +}; + /// Configuration for the state service. #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields, default)] pub struct Config { - /// The root directory for storing cached data. - /// - /// Cached data includes any state that can be replicated from the network - /// (e.g., the chain state, the blocks, the UTXO set, etc.). It does *not* - /// include private data that cannot be replicated from the network, such as - /// wallet data. That data is not handled by `zebra-state`. + /// The root directory for storing cached block data. /// - /// Each state format version and network has a separate state. - /// These states are stored in `state/vN/mainnet` and `state/vN/testnet` subdirectories, - /// underneath the `cache_dir` path, where `N` is the state format version. + /// If you change this directory, you might also want to change `network.cache_dir`. /// - /// When Zebra's state format changes, it creates a new state subdirectory for that version, - /// and re-syncs from genesis. + /// This cache stores permanent blockchain state that can be replicated from + /// the network, including the best chain, blocks, the UTXO set, and other indexes. + /// Any state that can be rolled back is only stored in memory. /// - /// Old state versions are [not automatically deleted](https://github.com/ZcashFoundation/zebra/issues/1213). - /// It is ok to manually delete old state versions. + /// The `zebra-state` cache does *not* include any private data, such as wallet data. /// - /// It is also ok to delete the entire cached state directory. - /// If you do, Zebra will re-sync from genesis next time it is launched. + /// You can delete the entire cached state directory, but it will impact your node's + /// readiness and network usage. If you do, Zebra will re-sync from genesis the next + /// time it is launched. /// /// The default directory is platform dependent, based on /// [`dirs::cache_dir()`](https://docs.rs/dirs/3.0.1/dirs/fn.cache_dir.html): @@ -51,6 +55,18 @@ pub struct Config { /// directory for this file before running Zebra, and make sure the Zebra user /// account has exclusive access to that directory, and other users can't modify /// its parent directories. + /// + /// # Implementation Details + /// + /// Each state format version and network has a separate state. + /// These states are stored in `state/vN/mainnet` and `state/vN/testnet` subdirectories, + /// underneath the `cache_dir` path, where `N` is the state format version. + /// + /// When Zebra's state format changes, it creates a new state subdirectory for that version, + /// and re-syncs from genesis. + /// + /// Old state versions are automatically deleted at startup. You can also manually delete old + /// state versions. pub cache_dir: PathBuf, /// Whether to use an ephemeral database. @@ -103,10 +119,7 @@ fn gen_temp_path(prefix: &str) -> PathBuf { impl Config { /// Returns the path for the finalized state database pub fn db_path(&self, network: Network) -> PathBuf { - let net_dir = match network { - Network::Mainnet => "mainnet", - Network::Testnet => "testnet", - }; + let net_dir = network.lowercase_name(); if self.ephemeral { gen_temp_path(&format!( @@ -122,6 +135,15 @@ impl Config { } } + /// Returns the path of the database format version file. + pub fn version_file_path(&self, network: Network) -> PathBuf { + let mut version_path = self.db_path(network); + + version_path.push(DATABASE_FORMAT_VERSION_FILE_NAME); + + version_path + } + /// Construct a config for an ephemeral database pub fn ephemeral() -> Config { Config { @@ -153,6 +175,7 @@ impl Default for Config { } // Cleaning up old database versions +// TODO: put this in a different module? /// Spawns a task that checks if there are old database folders, /// and deletes them from the filesystem. @@ -264,8 +287,96 @@ fn parse_dir_name(entry: &DirEntry) -> Option { /// Parse the state version number from `dir_name`. /// /// Returns `None` if parsing fails, or the directory name is not in the expected format. -fn parse_version_number(dir_name: &str) -> Option { +fn parse_version_number(dir_name: &str) -> Option { dir_name .strip_prefix('v') .and_then(|version| version.parse().ok()) } + +// TODO: move these to the format upgrade module + +/// Returns the full semantic version of the currently running database format code. +/// +/// This is the version implemented by the Zebra code that's currently running, +/// the minor and patch versions on disk can be different. +pub fn database_format_version_in_code() -> Version { + Version::new( + DATABASE_FORMAT_VERSION, + DATABASE_FORMAT_MINOR_VERSION, + DATABASE_FORMAT_PATCH_VERSION, + ) +} + +/// Returns the full semantic version of the on-disk database. +/// If there is no existing on-disk database, returns `Ok(None)`. +/// +/// This is the format of the data on disk, the minor and patch versions +/// implemented by the running Zebra code can be different. +pub fn database_format_version_on_disk( + config: &Config, + network: Network, +) -> Result, BoxError> { + let version_path = config.version_file_path(network); + + let version = match fs::read_to_string(version_path) { + Ok(version) => version, + Err(e) if e.kind() == ErrorKind::NotFound => { + // If the version file doesn't exist, don't guess the version. + // (It will end up being the version in code, once the database is created.) + return Ok(None); + } + Err(e) => Err(e)?, + }; + + let (minor, patch) = version + .split_once('.') + .ok_or("invalid database format version file")?; + + Ok(Some(Version::new( + DATABASE_FORMAT_VERSION, + minor.parse()?, + patch.parse()?, + ))) +} + +/// Writes `changed_version` to the on-disk database after the format is changed. +/// (Or a new database is created.) +/// +/// # Correctness +/// +/// This should only be called: +/// - after each format upgrade is complete, +/// - when creating a new database, or +/// - when an older Zebra version opens a newer database. +/// +/// # Concurrency +/// +/// This must only be called while RocksDB has an open database for `config`. +/// Otherwise, multiple Zebra processes could write the version at the same time, +/// corrupting the file. +/// +/// # Panics +/// +/// If the major versions do not match. (The format is incompatible.) +pub fn write_database_format_version_to_disk( + changed_version: &Version, + config: &Config, + network: Network, +) -> Result<(), BoxError> { + let version_path = config.version_file_path(network); + + // The major version is already in the directory path. + assert_eq!( + changed_version.major, DATABASE_FORMAT_VERSION, + "tried to do in-place database format change to an incompatible version" + ); + + let version = format!("{}.{}", changed_version.minor, changed_version.patch); + + // # Concurrency + // + // The caller handles locking for this file write. + fs::write(version_path, version.as_bytes())?; + + Ok(()) +} diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index 85ae1e77df1..b5060d10492 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -1,4 +1,11 @@ -//! Definitions of constants. +//! Constants that impact state behaviour. + +use lazy_static::lazy_static; +use regex::Regex; + +// For doc comment links +#[allow(unused_imports)] +use crate::config::{self, Config}; pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; @@ -19,13 +26,44 @@ pub use zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY; // TODO: change to HeightDiff pub const MAX_BLOCK_REORG_HEIGHT: u32 = MIN_TRANSPARENT_COINBASE_MATURITY - 1; -/// The database format version, incremented each time the database format changes. -pub const DATABASE_FORMAT_VERSION: u32 = 25; +/// The database format major version, incremented each time the on-disk database format has a +/// breaking data format change. +/// +/// Breaking changes include: +/// - deleting a column family, or +/// - changing a column family's data format in an incompatible way. +/// +/// Breaking changes become minor version changes if: +/// - we previously added compatibility code, and +/// - it's available in all supported Zebra versions. +/// +/// Use [`config::database_format_version_in_code()`] or +/// [`config::database_format_version_on_disk()`] to get the full semantic format version. +pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25; + +/// The database format minor version, incremented each time the on-disk database format has a +/// significant data format change. +/// +/// Significant changes include: +/// - adding new column families, +/// - changing the format of a column family in a compatible way, or +/// - breaking changes with compatibility code in all supported Zebra versions. +pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 1; + +/// The database format patch version, incremented each time the on-disk database format has a +/// significant format compatibility fix. +pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; + +/// The name of the file containing the minor and patch database versions. +/// +/// Use [`Config::version_file_path()`] to get the path to this file. +pub(crate) const DATABASE_FORMAT_VERSION_FILE_NAME: &str = "version"; /// The maximum number of blocks to check for NU5 transactions, /// before we assume we are on a pre-NU5 legacy chain. /// -/// Zebra usually only has to check back a few blocks, but on testnet it can be a long time between v5 transactions. +/// Zebra usually only has to check back a few blocks on mainnet, but on testnet it can be a long +/// time between v5 transactions. pub const MAX_LEGACY_CHAIN_BLOCKS: usize = 100_000; /// The maximum number of non-finalized chain forks Zebra will track. @@ -58,9 +96,6 @@ const MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_PROTOCOL: u32 = 160; pub const MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_ZEBRA: u32 = MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_PROTOCOL - 2; -use lazy_static::lazy_static; -use regex::Regex; - lazy_static! { /// Regex that matches the RocksDB error when its lock file is already open. pub static ref LOCK_FILE_ERROR: Regex = Regex::new("(lock file).*(temporarily unavailable)|(in use)|(being used by another process)").expect("regex is valid"); diff --git a/zebra-state/src/error.rs b/zebra-state/src/error.rs index f75f0386810..cf495311efb 100644 --- a/zebra-state/src/error.rs +++ b/zebra-state/src/error.rs @@ -24,8 +24,8 @@ pub struct CloneError { source: Arc, } -impl From for CloneError { - fn from(source: CommitBlockError) -> Self { +impl From for CloneError { + fn from(source: CommitSemanticallyVerifiedError) -> Self { let source = Arc::new(source); Self { source } } @@ -41,17 +41,17 @@ impl From for CloneError { /// A boxed [`std::error::Error`]. pub type BoxError = Box; -/// An error describing the reason a block could not be committed to the state. +/// An error describing the reason a semantically verified block could not be committed to the state. #[derive(Debug, Error, PartialEq, Eq)] #[error("block is not contextually valid: {}", .0)] -pub struct CommitBlockError(#[from] ValidateContextError); +pub struct CommitSemanticallyVerifiedError(#[from] ValidateContextError); /// An error describing why a block failed contextual validation. #[derive(Debug, Error, Clone, PartialEq, Eq)] #[non_exhaustive] #[allow(missing_docs)] pub enum ValidateContextError { - #[error("block parent not found in any chain")] + #[error("block parent not found in any chain, or not enough blocks in chain")] #[non_exhaustive] NotReadyToBeCommitted, diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index 8b7dbd8ecfe..eedb90d1328 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -29,12 +29,18 @@ mod service; #[cfg(test)] mod tests; -pub use config::{check_and_delete_old_databases, Config}; +pub use config::{ + check_and_delete_old_databases, database_format_version_in_code, + database_format_version_on_disk, Config, +}; pub use constants::MAX_BLOCK_REORG_HEIGHT; pub use error::{ - BoxError, CloneError, CommitBlockError, DuplicateNullifierError, ValidateContextError, + BoxError, CloneError, CommitSemanticallyVerifiedError, DuplicateNullifierError, + ValidateContextError, +}; +pub use request::{ + CheckpointVerifiedBlock, HashOrHeight, ReadRequest, Request, SemanticallyVerifiedBlock, }; -pub use request::{FinalizedBlock, HashOrHeight, PreparedBlock, ReadRequest, Request}; pub use response::{KnownBlock, MinedTx, ReadResponse, Response}; pub use service::{ chain_tip::{ChainTipChange, LatestChainTip, TipAction}, @@ -54,4 +60,7 @@ pub use service::{ init_test, init_test_services, ReadStateService, }; -pub(crate) use request::ContextuallyValidBlock; +#[cfg(any(test, feature = "proptest-impl"))] +pub use config::write_database_format_version_to_disk; + +pub(crate) use request::ContextuallyVerifiedBlock; diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 6236eb249a8..cc3df2fd2fc 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -2,7 +2,7 @@ use std::{ collections::{HashMap, HashSet}, - ops::RangeInclusive, + ops::{Deref, DerefMut, RangeInclusive}, sync::Arc, }; @@ -137,7 +137,7 @@ impl std::str::FromStr for HashOrHeight { /// the *service caller*'s task, not inside the service call itself. This allows /// moving work out of the single-threaded state service. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct PreparedBlock { +pub struct SemanticallyVerifiedBlock { /// The block to commit to the state. pub block: Arc, /// The hash of the block. @@ -162,15 +162,30 @@ pub struct PreparedBlock { pub transaction_hashes: Arc<[transaction::Hash]>, } +/// A block ready to be committed directly to the finalized state with +/// a small number of checks if compared with a `ContextuallyVerifiedBlock`. +/// +/// This is exposed for use in checkpointing. +/// +/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is +/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the +/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct CheckpointVerifiedBlock(pub(crate) SemanticallyVerifiedBlock); + // Some fields are pub(crate), so we can add whatever db-format-dependent // precomputation we want here without leaking internal details. -/// A contextually validated block, ready to be committed directly to the finalized state with -/// no checks, if it becomes the root of the best non-finalized chain. +/// A contextually verified block, ready to be committed directly to the finalized state with no +/// checks, if it becomes the root of the best non-finalized chain. /// /// Used by the state service and non-finalized `Chain`. +/// +/// Note: The difference between a `CheckpointVerifiedBlock` and a `ContextuallyVerifiedBlock` is +/// that the `CheckpointVerifier` doesn't bind the transaction authorizing data to the +/// `ChainHistoryBlockTxAuthCommitmentHash`, but the `NonFinalizedState` and `FinalizedState` do. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ContextuallyValidBlock { +pub struct ContextuallyVerifiedBlock { /// The block to commit to the state. pub(crate) block: Arc, @@ -207,32 +222,6 @@ pub struct ContextuallyValidBlock { pub(crate) chain_value_pool_change: ValueBalance, } -/// A finalized block, ready to be committed directly to the finalized state with -/// no checks. -/// -/// This is exposed for use in checkpointing. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct FinalizedBlock { - /// The block to commit to the state. - pub block: Arc, - /// The hash of the block. - pub hash: block::Hash, - /// The height of the block. - pub height: block::Height, - /// New transparent outputs created in this block, indexed by - /// [`OutPoint`](transparent::OutPoint). - /// - /// Note: although these transparent outputs are newly created, they may not - /// be unspent, since a later transaction in a block can spend outputs of an - /// earlier transaction. - /// - /// This field can also contain unrelated outputs, which are ignored. - pub(crate) new_outputs: HashMap, - /// A precomputed list of the hashes of the transactions in this block, - /// in the same order as `block.transactions`. - pub transaction_hashes: Arc<[transaction::Hash]>, -} - /// Wraps note commitment trees and the history tree together. pub struct Treestate { /// Note commitment trees. @@ -265,43 +254,69 @@ impl Treestate { /// Zebra's non-finalized state passes this `struct` over to the finalized state /// when committing a block. The associated treestate is passed so that the /// finalized state does not have to retrieve the previous treestate from the -/// database and recompute the new one. -pub struct FinalizedWithTrees { +/// database and recompute a new one. +pub struct SemanticallyVerifiedBlockWithTrees { /// A block ready to be committed. - pub finalized: FinalizedBlock, + pub verified: SemanticallyVerifiedBlock, /// The tresstate associated with the block. - pub treestate: Option, + pub treestate: Treestate, } -impl FinalizedWithTrees { - pub fn new(block: ContextuallyValidBlock, treestate: Treestate) -> Self { - let finalized = FinalizedBlock::from(block); +/// Contains a block ready to be committed. +/// +/// Zebra's state service passes this `enum` over to the finalized state +/// when committing a block. +pub enum FinalizableBlock { + Checkpoint { + checkpoint_verified: CheckpointVerifiedBlock, + }, + Contextual { + contextually_verified: ContextuallyVerifiedBlock, + treestate: Treestate, + }, +} - Self { - finalized, - treestate: Some(treestate), +impl FinalizableBlock { + /// Create a new [`FinalizableBlock`] given a [`ContextuallyVerifiedBlock`]. + pub fn new(contextually_verified: ContextuallyVerifiedBlock, treestate: Treestate) -> Self { + Self::Contextual { + contextually_verified, + treestate, } } -} -impl From> for FinalizedWithTrees { - fn from(block: Arc) -> Self { - Self::from(FinalizedBlock::from(block)) + #[cfg(test)] + /// Extract a [`Block`] from a [`FinalizableBlock`] variant. + pub fn inner_block(&self) -> Arc { + match self { + FinalizableBlock::Checkpoint { + checkpoint_verified, + } => checkpoint_verified.block.clone(), + FinalizableBlock::Contextual { + contextually_verified, + .. + } => contextually_verified.block.clone(), + } } } -impl From for FinalizedWithTrees { - fn from(block: FinalizedBlock) -> Self { - Self { - finalized: block, - treestate: None, +impl From for FinalizableBlock { + fn from(checkpoint_verified: CheckpointVerifiedBlock) -> Self { + Self::Checkpoint { + checkpoint_verified, } } } -impl From<&PreparedBlock> for PreparedBlock { - fn from(prepared: &PreparedBlock) -> Self { - prepared.clone() +impl From> for FinalizableBlock { + fn from(block: Arc) -> Self { + Self::from(CheckpointVerifiedBlock::from(block)) + } +} + +impl From<&SemanticallyVerifiedBlock> for SemanticallyVerifiedBlock { + fn from(semantically_verified: &SemanticallyVerifiedBlock) -> Self { + semantically_verified.clone() } } @@ -309,27 +324,27 @@ impl From<&PreparedBlock> for PreparedBlock { // the *service caller*'s task, not inside the service call itself. // This allows moving work out of the single-threaded state service. -impl ContextuallyValidBlock { +impl ContextuallyVerifiedBlock { /// Create a block that's ready for non-finalized `Chain` contextual validation, - /// using a [`PreparedBlock`] and the UTXOs it spends. + /// using a [`SemanticallyVerifiedBlock`] and the UTXOs it spends. /// - /// When combined, `prepared.new_outputs` and `spent_utxos` must contain + /// When combined, `semantically_verified.new_outputs` and `spent_utxos` must contain /// the [`Utxo`](transparent::Utxo)s spent by every transparent input in this block, /// including UTXOs created by earlier transactions in this block. /// - /// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until - /// `Chain::update_chain_state_with` returns success. + /// Note: a [`ContextuallyVerifiedBlock`] isn't actually contextually valid until + /// [`Chain::push()`](crate::service::non_finalized_state::Chain::push) returns success. pub fn with_block_and_spent_utxos( - prepared: PreparedBlock, + semantically_verified: SemanticallyVerifiedBlock, mut spent_outputs: HashMap, ) -> Result { - let PreparedBlock { + let SemanticallyVerifiedBlock { block, hash, height, new_outputs, transaction_hashes, - } = prepared; + } = semantically_verified; // This is redundant for the non-finalized state, // but useful to make some tests pass more easily. @@ -350,20 +365,15 @@ impl ContextuallyValidBlock { } } -impl FinalizedBlock { - /// Create a block that's ready to be committed to the finalized state, - /// using a precalculated [`block::Hash`]. - /// - /// Note: a [`FinalizedBlock`] isn't actually finalized - /// until [`Request::CommitFinalizedBlock`] returns success. - pub fn with_hash(block: Arc, hash: block::Hash) -> Self { +impl SemanticallyVerifiedBlock { + fn with_hash(block: Arc, hash: block::Hash) -> Self { let height = block .coinbase_height() .expect("coinbase height was already checked"); let transaction_hashes: Arc<[_]> = block.transactions.iter().map(|tx| tx.hash()).collect(); - let new_outputs = transparent::new_outputs(&block, &transaction_hashes); + let new_outputs = transparent::new_ordered_outputs(&block, &transaction_hashes); - Self { + SemanticallyVerifiedBlock { block, hash, height, @@ -373,17 +383,36 @@ impl FinalizedBlock { } } -impl From> for FinalizedBlock { +impl CheckpointVerifiedBlock { + /// Create a block that's ready to be committed to the finalized state, + /// using a precalculated [`block::Hash`]. + /// + /// Note: a [`CheckpointVerifiedBlock`] isn't actually finalized + /// until [`Request::CommitCheckpointVerifiedBlock`] returns success. + pub fn with_hash(block: Arc, hash: block::Hash) -> Self { + Self(SemanticallyVerifiedBlock::with_hash(block, hash)) + } +} + +impl From> for CheckpointVerifiedBlock { + fn from(block: Arc) -> Self { + let hash = block.hash(); + + CheckpointVerifiedBlock::with_hash(block, hash) + } +} + +impl From> for SemanticallyVerifiedBlock { fn from(block: Arc) -> Self { let hash = block.hash(); - FinalizedBlock::with_hash(block, hash) + SemanticallyVerifiedBlock::with_hash(block, hash) } } -impl From for FinalizedBlock { - fn from(contextually_valid: ContextuallyValidBlock) -> Self { - let ContextuallyValidBlock { +impl From for SemanticallyVerifiedBlock { + fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self { + let ContextuallyVerifiedBlock { block, hash, height, @@ -397,22 +426,40 @@ impl From for FinalizedBlock { block, hash, height, - new_outputs: utxos_from_ordered_utxos(new_outputs), + new_outputs, transaction_hashes, } } } +impl From for SemanticallyVerifiedBlock { + fn from(checkpoint_verified: CheckpointVerifiedBlock) -> Self { + checkpoint_verified.0 + } +} + +impl Deref for CheckpointVerifiedBlock { + type Target = SemanticallyVerifiedBlock; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl DerefMut for CheckpointVerifiedBlock { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + #[derive(Clone, Debug, PartialEq, Eq)] /// A query about or modification to the chain state, via the /// [`StateService`](crate::service::StateService). pub enum Request { - /// Performs contextual validation of the given block, committing it to the - /// state if successful. + /// Performs contextual validation of the given semantically verified block, + /// committing it to the state if successful. /// - /// It is the caller's responsibility to perform semantic validation. This - /// request can be made out-of-order; the state service will queue it until - /// its parent is ready. + /// This request can be made out-of-order; the state service will queue it + /// until its parent is ready. /// /// Returns [`Response::Committed`] with the hash of the block when it is /// committed to the state, or an error if the block fails contextual @@ -428,14 +475,14 @@ pub enum Request { /// Block commit requests should be wrapped in a timeout, so that /// out-of-order and invalid requests do not hang indefinitely. See the [`crate`] /// documentation for details. - CommitBlock(PreparedBlock), + CommitSemanticallyVerifiedBlock(SemanticallyVerifiedBlock), - /// Commit a checkpointed block to the state, skipping most block validation. + /// Commit a checkpointed block to the state, skipping most but not all + /// contextual validation. /// - /// This is exposed for use in checkpointing, which produces finalized - /// blocks. It is the caller's responsibility to ensure that the block is - /// semantically valid and final. This request can be made out-of-order; - /// the state service will queue it until its parent is ready. + /// This is exposed for use in checkpointing, which produces checkpoint vefified + /// blocks. This request can be made out-of-order; the state service will queue + /// it until its parent is ready. /// /// Returns [`Response::Committed`] with the hash of the newly committed /// block, or an error. @@ -447,8 +494,9 @@ pub enum Request { /// /// # Note /// - /// Finalized and non-finalized blocks are an internal Zebra implementation detail. - /// There is no difference between these blocks on the network, or in Zebra's + /// [`SemanticallyVerifiedBlock`], [`ContextuallyVerifiedBlock`] and + /// [`CheckpointVerifiedBlock`] are an internal Zebra implementation detail. + /// There is no difference between these blocks on the Zcash network, or in Zebra's /// network or syncer implementations. /// /// # Consensus @@ -474,7 +522,7 @@ pub enum Request { /// Block commit requests should be wrapped in a timeout, so that /// out-of-order and invalid requests do not hang indefinitely. See the [`crate`] /// documentation for details. - CommitFinalizedBlock(FinalizedBlock), + CommitCheckpointVerifiedBlock(CheckpointVerifiedBlock), /// Computes the depth in the current best chain of the block identified by the given hash. /// @@ -619,14 +667,15 @@ pub enum Request { /// /// Returns [`Response::ValidBlockProposal`] when successful. /// See `[ReadRequest::CheckBlockProposalValidity]` for details. - CheckBlockProposalValidity(PreparedBlock), + CheckBlockProposalValidity(SemanticallyVerifiedBlock), } impl Request { fn variant_name(&self) -> &'static str { match self { - Request::CommitBlock(_) => "commit_block", - Request::CommitFinalizedBlock(_) => "commit_finalized_block", + Request::CommitSemanticallyVerifiedBlock(_) => "commit_semantically_verified_block", + Request::CommitCheckpointVerifiedBlock(_) => "commit_checkpoint_verified_block", + Request::AwaitUtxo(_) => "await_utxo", Request::Depth(_) => "depth", Request::Tip => "tip", @@ -870,7 +919,7 @@ pub enum ReadRequest { /// /// Returns [`ReadResponse::ValidBlockProposal`] when successful, or an error if /// the block fails contextual validation. - CheckBlockProposalValidity(PreparedBlock), + CheckBlockProposalValidity(SemanticallyVerifiedBlock), } impl ReadRequest { @@ -947,9 +996,8 @@ impl TryFrom for ReadRequest { Ok(ReadRequest::CheckBestChainTipNullifiersAndAnchors(tx)) } - Request::CommitBlock(_) | Request::CommitFinalizedBlock(_) => { - Err("ReadService does not write blocks") - } + Request::CommitSemanticallyVerifiedBlock(_) + | Request::CommitCheckpointVerifiedBlock(_) => Err("ReadService does not write blocks"), Request::AwaitUtxo(_) => Err("ReadService does not track pending UTXOs. \ Manually convert the request to ReadRequest::AnyChainUtxo, \ @@ -958,9 +1006,9 @@ impl TryFrom for ReadRequest { Request::KnownBlock(_) => Err("ReadService does not track queued blocks"), #[cfg(feature = "getblocktemplate-rpcs")] - Request::CheckBlockProposalValidity(prepared) => { - Ok(ReadRequest::CheckBlockProposalValidity(prepared)) - } + Request::CheckBlockProposalValidity(semantically_verified) => Ok( + ReadRequest::CheckBlockProposalValidity(semantically_verified), + ), } } } diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 7f1ea935e29..ba7f8d29ba8 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -24,7 +24,7 @@ use crate::{service::read::AddressUtxos, TransactionLocation}; #[derive(Clone, Debug, PartialEq, Eq)] /// A response to a [`StateService`](crate::service::StateService) [`Request`]. pub enum Response { - /// Response to [`Request::CommitBlock`] indicating that a block was + /// Response to [`Request::CommitSemanticallyVerifiedBlock`] indicating that a block was /// successfully committed to the state. Committed(block::Hash), @@ -32,6 +32,9 @@ pub enum Response { Depth(Option), /// Response to [`Request::Tip`] with the current best chain tip. + // + // TODO: remove this request, and replace it with a call to + // `LatestChainTip::best_tip_height_and_hash()` Tip(Option<(block::Height, block::Hash)>), /// Response to [`Request::BlockLocator`] with a block locator object. diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 48a841d0cdf..5340ef14bc0 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -43,7 +43,7 @@ use tower::buffer::Buffer; use zebra_chain::{ block::{self, CountedHeader, HeightDiff}, - diagnostic::CodeTimer, + diagnostic::{task::WaitForPanics, CodeTimer}, parameters::{Network, NetworkUpgrade}, }; @@ -61,8 +61,8 @@ use crate::{ queued_blocks::QueuedBlocks, watch_receiver::WatchReceiver, }, - BoxError, CloneError, Config, FinalizedBlock, PreparedBlock, ReadRequest, ReadResponse, - Request, Response, + BoxError, CheckpointVerifiedBlock, CloneError, Config, ReadRequest, ReadResponse, Request, + Response, SemanticallyVerifiedBlock, }; pub mod block_iter; @@ -86,7 +86,7 @@ mod tests; pub use finalized_state::{OutputIndex, OutputLocation, TransactionLocation}; -use self::queued_blocks::{QueuedFinalized, QueuedNonFinalized, SentHashes}; +use self::queued_blocks::{QueuedCheckpointVerified, QueuedSemanticallyVerified, SentHashes}; /// A read-write service for Zebra's cached blockchain state. /// @@ -124,25 +124,26 @@ pub(crate) struct StateService { // /// Queued blocks for the [`NonFinalizedState`] that arrived out of order. /// These blocks are awaiting their parent blocks before they can do contextual verification. - queued_non_finalized_blocks: QueuedBlocks, + non_finalized_state_queued_blocks: QueuedBlocks, /// Queued blocks for the [`FinalizedState`] that arrived out of order. /// These blocks are awaiting their parent blocks before they can do contextual verification. /// /// Indexed by their parent block hash. - queued_finalized_blocks: HashMap, + finalized_state_queued_blocks: HashMap, /// A channel to send blocks to the `block_write_task`, /// so they can be written to the [`NonFinalizedState`]. non_finalized_block_write_sender: - Option>, + Option>, /// A channel to send blocks to the `block_write_task`, /// so they can be written to the [`FinalizedState`]. /// /// This sender is dropped after the state has finished sending all the checkpointed blocks, - /// and the lowest non-finalized block arrives. - finalized_block_write_sender: Option>, + /// and the lowest semantically verified block arrives. + finalized_block_write_sender: + Option>, /// The [`block::Hash`] of the most recent block sent on /// `finalized_block_write_sender` or `non_finalized_block_write_sender`. @@ -151,25 +152,20 @@ pub(crate) struct StateService { /// - the finalized tip, if there are stored blocks, or /// - the genesis block's parent hash, if the database is empty. /// - /// If `invalid_block_reset_receiver` gets a reset, this is: + /// If `invalid_block_write_reset_receiver` gets a reset, this is: /// - the hash of the last valid committed block (the parent of the invalid block). - // - // TODO: - // - turn this into an IndexMap containing recent non-finalized block hashes and heights - // (they are all potential tips) - // - remove block hashes once their heights are strictly less than the finalized tip - last_sent_finalized_block_hash: block::Hash, + finalized_block_write_last_sent_hash: block::Hash, /// A set of block hashes that have been sent to the block write task. /// Hashes of blocks below the finalized tip height are periodically pruned. - sent_non_finalized_block_hashes: SentHashes, + non_finalized_block_write_sent_hashes: SentHashes, /// If an invalid block is sent on `finalized_block_write_sender` /// or `non_finalized_block_write_sender`, /// this channel gets the [`block::Hash`] of the valid tip. // // TODO: add tests for finalized and non-finalized resets (#2654) - invalid_block_reset_receiver: tokio::sync::mpsc::UnboundedReceiver, + invalid_block_write_reset_receiver: tokio::sync::mpsc::UnboundedReceiver, // Pending UTXO Request Tracking // @@ -188,11 +184,11 @@ pub(crate) struct StateService { // Metrics // - /// A metric tracking the maximum height that's currently in `queued_finalized_blocks` + /// A metric tracking the maximum height that's currently in `finalized_state_queued_blocks` /// - /// Set to `f64::NAN` if `queued_finalized_blocks` is empty, because grafana shows NaNs + /// Set to `f64::NAN` if `finalized_state_queued_blocks` is empty, because grafana shows NaNs /// as a break in the graph. - max_queued_finalized_height: f64, + max_finalized_queue_height: f64, } /// A read-only service for accessing Zebra's cached blockchain state. @@ -245,16 +241,16 @@ impl Drop for StateService { // Close the channels (non-blocking) // This makes the block write thread exit the next time it checks the channels. // We want to do this here so we get any errors or panics from the block write task before it shuts down. - self.invalid_block_reset_receiver.close(); + self.invalid_block_write_reset_receiver.close(); std::mem::drop(self.finalized_block_write_sender.take()); std::mem::drop(self.non_finalized_block_write_sender.take()); self.clear_finalized_block_queue( - "dropping the state: dropped unused queued finalized block", + "dropping the state: dropped unused finalized state queue block", ); self.clear_non_finalized_block_queue( - "dropping the state: dropped unused queued non-finalized block", + "dropping the state: dropped unused non-finalized state queue block", ); // Then drop self.read_service, which checks the block write task for panics, @@ -267,8 +263,9 @@ impl Drop for ReadStateService { // The read state service shares the state, // so dropping it should check if we can shut down. + // TODO: move this into a try_shutdown() method if let Some(block_write_task) = self.block_write_task.take() { - if let Ok(block_write_task_handle) = Arc::try_unwrap(block_write_task) { + if let Some(block_write_task_handle) = Arc::into_inner(block_write_task) { // We're the last database user, so we can tell it to shut down (blocking): // - flushes the database to disk, and // - drops the database, which cleans up any database tasks correctly. @@ -284,6 +281,7 @@ impl Drop for ReadStateService { #[cfg(test)] debug!("waiting for the block write task to finish"); + // TODO: move this into a check_for_panics() method if let Err(thread_panic) = block_write_task_handle.join() { std::panic::resume_unwind(thread_panic); } else { @@ -345,11 +343,9 @@ impl StateService { let initial_tip = finalized_state .db .tip_block() - .map(FinalizedBlock::from) + .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from); - timer.finish(module_path!(), line!(), "fetching database tip"); - let timer = CodeTimer::start(); let (chain_tip_sender, latest_chain_tip, chain_tip_change) = ChainTipSender::new(initial_tip, network); @@ -364,7 +360,7 @@ impl StateService { tokio::sync::mpsc::unbounded_channel(); let (finalized_block_write_sender, finalized_block_write_receiver) = tokio::sync::mpsc::unbounded_channel(); - let (invalid_block_reset_sender, invalid_block_reset_receiver) = + let (invalid_block_reset_sender, invalid_block_write_reset_receiver) = tokio::sync::mpsc::unbounded_channel(); let finalized_state_for_writing = finalized_state.clone(); @@ -396,25 +392,25 @@ impl StateService { let full_verifier_utxo_lookahead = full_verifier_utxo_lookahead.expect("unexpected negative height"); - let queued_non_finalized_blocks = QueuedBlocks::default(); + let non_finalized_state_queued_blocks = QueuedBlocks::default(); let pending_utxos = PendingUtxos::default(); - let last_sent_finalized_block_hash = finalized_state.db.finalized_tip_hash(); + let finalized_block_write_last_sent_hash = finalized_state.db.finalized_tip_hash(); let state = Self { network, full_verifier_utxo_lookahead, - queued_non_finalized_blocks, - queued_finalized_blocks: HashMap::new(), + non_finalized_state_queued_blocks, + finalized_state_queued_blocks: HashMap::new(), non_finalized_block_write_sender: Some(non_finalized_block_write_sender), finalized_block_write_sender: Some(finalized_block_write_sender), - last_sent_finalized_block_hash, - sent_non_finalized_block_hashes: SentHashes::default(), - invalid_block_reset_receiver, + finalized_block_write_last_sent_hash, + non_finalized_block_write_sent_hashes: SentHashes::default(), + invalid_block_write_reset_receiver, pending_utxos, last_prune: Instant::now(), read_service: read_service.clone(), - max_queued_finalized_height: f64::NAN, + max_finalized_queue_height: f64::NAN, }; timer.finish(module_path!(), line!(), "initializing state service"); @@ -454,95 +450,95 @@ impl StateService { (state, read_service, latest_chain_tip, chain_tip_change) } - /// Queue a finalized block for verification and storage in the finalized state. + /// Queue a checkpoint verified block for verification and storage in the finalized state. /// /// Returns a channel receiver that provides the result of the block commit. - fn queue_and_commit_finalized( + fn queue_and_commit_to_finalized_state( &mut self, - finalized: FinalizedBlock, + checkpoint_verified: CheckpointVerifiedBlock, ) -> oneshot::Receiver> { // # Correctness & Performance // // This method must not block, access the database, or perform CPU-intensive tasks, // because it is called directly from the tokio executor's Future threads. - let queued_prev_hash = finalized.block.header.previous_block_hash; - let queued_height = finalized.height; + let queued_prev_hash = checkpoint_verified.block.header.previous_block_hash; + let queued_height = checkpoint_verified.height; // If we're close to the final checkpoint, make the block's UTXOs available for - // full verification of non-finalized blocks, even when it is in the channel. + // semantic block verification, even when it is in the channel. if self.is_close_to_final_checkpoint(queued_height) { - self.sent_non_finalized_block_hashes - .add_finalized(&finalized) + self.non_finalized_block_write_sent_hashes + .add_finalized(&checkpoint_verified) } let (rsp_tx, rsp_rx) = oneshot::channel(); - let queued = (finalized, rsp_tx); + let queued = (checkpoint_verified, rsp_tx); if self.finalized_block_write_sender.is_some() { - // We're still committing finalized blocks + // We're still committing checkpoint verified blocks if let Some(duplicate_queued) = self - .queued_finalized_blocks + .finalized_state_queued_blocks .insert(queued_prev_hash, queued) { - Self::send_finalized_block_error( + Self::send_checkpoint_verified_block_error( duplicate_queued, - "dropping older finalized block: got newer duplicate block", + "dropping older checkpoint verified block: got newer duplicate block", ); } - self.drain_queue_and_commit_finalized(); + self.drain_finalized_queue_and_commit(); } else { - // We've finished committing finalized blocks, so drop any repeated queued blocks, - // and return an error. + // We've finished committing checkpoint verified blocks to the finalized state, + // so drop any repeated queued blocks, and return an error. // // TODO: track the latest sent height, and drop any blocks under that height - // every time we send some blocks (like QueuedNonFinalizedBlocks) - Self::send_finalized_block_error( + // every time we send some blocks (like QueuedSemanticallyVerifiedBlocks) + Self::send_checkpoint_verified_block_error( queued, - "already finished committing finalized blocks: dropped duplicate block, \ + "already finished committing checkpoint verified blocks: dropped duplicate block, \ block is already committed to the state", ); self.clear_finalized_block_queue( - "already finished committing finalized blocks: dropped duplicate block, \ + "already finished committing checkpoint verified blocks: dropped duplicate block, \ block is already committed to the state", ); } - if self.queued_finalized_blocks.is_empty() { - self.max_queued_finalized_height = f64::NAN; - } else if self.max_queued_finalized_height.is_nan() - || self.max_queued_finalized_height < queued_height.0 as f64 + if self.finalized_state_queued_blocks.is_empty() { + self.max_finalized_queue_height = f64::NAN; + } else if self.max_finalized_queue_height.is_nan() + || self.max_finalized_queue_height < queued_height.0 as f64 { // if there are still blocks in the queue, then either: // - the new block was lower than the old maximum, and there was a gap before it, // so the maximum is still the same (and we skip this code), or // - the new block is higher than the old maximum, and there is at least one gap // between the finalized tip and the new maximum - self.max_queued_finalized_height = queued_height.0 as f64; + self.max_finalized_queue_height = queued_height.0 as f64; } metrics::gauge!( "state.checkpoint.queued.max.height", - self.max_queued_finalized_height, + self.max_finalized_queue_height, ); metrics::gauge!( "state.checkpoint.queued.block.count", - self.queued_finalized_blocks.len() as f64, + self.finalized_state_queued_blocks.len() as f64, ); rsp_rx } - /// Finds queued finalized blocks to be committed to the state in order, + /// Finds finalized state queue blocks to be committed to the state in order, /// removes them from the queue, and sends them to the block commit task. /// /// After queueing a finalized block, this method checks whether the newly /// queued block (and any of its descendants) can be committed to the state. /// /// Returns an error if the block commit channel has been closed. - pub fn drain_queue_and_commit_finalized(&mut self) { + pub fn drain_finalized_queue_and_commit(&mut self) { use tokio::sync::mpsc::error::{SendError, TryRecvError}; // # Correctness & Performance @@ -551,8 +547,8 @@ impl StateService { // because it is called directly from the tokio executor's Future threads. // If a block failed, we need to start again from a valid tip. - match self.invalid_block_reset_receiver.try_recv() { - Ok(reset_tip_hash) => self.last_sent_finalized_block_hash = reset_tip_hash, + match self.invalid_block_write_reset_receiver.try_recv() { + Ok(reset_tip_hash) => self.finalized_block_write_last_sent_hash = reset_tip_hash, Err(TryRecvError::Disconnected) => { info!("Block commit task closed the block reset channel. Is Zebra shutting down?"); return; @@ -562,12 +558,12 @@ impl StateService { } while let Some(queued_block) = self - .queued_finalized_blocks - .remove(&self.last_sent_finalized_block_hash) + .finalized_state_queued_blocks + .remove(&self.finalized_block_write_last_sent_hash) { let last_sent_finalized_block_height = queued_block.0.height; - self.last_sent_finalized_block_hash = queued_block.0.hash; + self.finalized_block_write_last_sent_hash = queued_block.0.hash; // If we've finished sending finalized blocks, ignore any repeated blocks. // (Blocks can be repeated after a syncer reset.) @@ -577,7 +573,7 @@ impl StateService { // If the receiver is closed, we can't send any more blocks. if let Err(SendError(queued)) = send_result { // If Zebra is shutting down, drop blocks and return an error. - Self::send_finalized_block_error( + Self::send_checkpoint_verified_block_error( queued, "block commit task exited. Is Zebra shutting down?", ); @@ -595,15 +591,18 @@ impl StateService { } } - /// Drops all queued finalized blocks, and sends an error on their result channels. + /// Drops all finalized state queue blocks, and sends an error on their result channels. fn clear_finalized_block_queue(&mut self, error: impl Into + Clone) { - for (_hash, queued) in self.queued_finalized_blocks.drain() { - Self::send_finalized_block_error(queued, error.clone()); + for (_hash, queued) in self.finalized_state_queued_blocks.drain() { + Self::send_checkpoint_verified_block_error(queued, error.clone()); } } - /// Send an error on a `QueuedFinalized` block's result channel, and drop the block - fn send_finalized_block_error(queued: QueuedFinalized, error: impl Into) { + /// Send an error on a `QueuedCheckpointVerified` block's result channel, and drop the block + fn send_checkpoint_verified_block_error( + queued: QueuedCheckpointVerified, + error: impl Into, + ) { let (finalized, rsp_tx) = queued; // The block sender might have already given up on this block, @@ -612,15 +611,18 @@ impl StateService { std::mem::drop(finalized); } - /// Drops all queued non-finalized blocks, and sends an error on their result channels. + /// Drops all non-finalized state queue blocks, and sends an error on their result channels. fn clear_non_finalized_block_queue(&mut self, error: impl Into + Clone) { - for (_hash, queued) in self.queued_non_finalized_blocks.drain() { - Self::send_non_finalized_block_error(queued, error.clone()); + for (_hash, queued) in self.non_finalized_state_queued_blocks.drain() { + Self::send_semantically_verified_block_error(queued, error.clone()); } } - /// Send an error on a `QueuedNonFinalized` block's result channel, and drop the block - fn send_non_finalized_block_error(queued: QueuedNonFinalized, error: impl Into) { + /// Send an error on a `QueuedSemanticallyVerified` block's result channel, and drop the block + fn send_semantically_verified_block_error( + queued: QueuedSemanticallyVerified, + error: impl Into, + ) { let (finalized, rsp_tx) = queued; // The block sender might have already given up on this block, @@ -629,24 +631,24 @@ impl StateService { std::mem::drop(finalized); } - /// Queue a non finalized block for verification and check if any queued + /// Queue a semantically verified block for contextual verification and check if any queued /// blocks are ready to be verified and committed to the state. /// /// This function encodes the logic for [committing non-finalized blocks][1] /// in RFC0005. /// /// [1]: https://zebra.zfnd.org/dev/rfcs/0005-state-updates.html#committing-non-finalized-blocks - #[instrument(level = "debug", skip(self, prepared))] - fn queue_and_commit_non_finalized( + #[instrument(level = "debug", skip(self, semantically_verrified))] + fn queue_and_commit_to_non_finalized_state( &mut self, - prepared: PreparedBlock, + semantically_verrified: SemanticallyVerifiedBlock, ) -> oneshot::Receiver> { - tracing::debug!(block = %prepared.block, "queueing block for contextual verification"); - let parent_hash = prepared.block.header.previous_block_hash; + tracing::debug!(block = %semantically_verrified.block, "queueing block for contextual verification"); + let parent_hash = semantically_verrified.block.header.previous_block_hash; if self - .sent_non_finalized_block_hashes - .contains(&prepared.hash) + .non_finalized_block_write_sent_hashes + .contains(&semantically_verrified.hash) { let (rsp_tx, rsp_rx) = oneshot::channel(); let _ = rsp_tx.send(Err( @@ -655,7 +657,11 @@ impl StateService { return rsp_rx; } - if self.read_service.db.contains_height(prepared.height) { + if self + .read_service + .db + .contains_height(semantically_verrified.height) + { let (rsp_tx, rsp_rx) = oneshot::channel(); let _ = rsp_tx.send(Err( "block height is in the finalized state: block is already committed to the state" @@ -664,11 +670,12 @@ impl StateService { return rsp_rx; } - // Request::CommitBlock contract: a request to commit a block which has - // been queued but not yet committed to the state fails the older - // request and replaces it with the newer request. - let rsp_rx = if let Some((_, old_rsp_tx)) = - self.queued_non_finalized_blocks.get_mut(&prepared.hash) + // [`Request::CommitSemanticallyVerifiedBlock`] contract: a request to commit a block which + // has been queued but not yet committed to the state fails the older request and replaces + // it with the newer request. + let rsp_rx = if let Some((_, old_rsp_tx)) = self + .non_finalized_state_queued_blocks + .get_mut(&semantically_verrified.hash) { tracing::debug!("replacing older queued request with new request"); let (mut rsp_tx, rsp_rx) = oneshot::channel(); @@ -677,12 +684,13 @@ impl StateService { rsp_rx } else { let (rsp_tx, rsp_rx) = oneshot::channel(); - self.queued_non_finalized_blocks.queue((prepared, rsp_tx)); + self.non_finalized_state_queued_blocks + .queue((semantically_verrified, rsp_tx)); rsp_rx }; - // We've finished sending finalized blocks when: - // - we've sent the finalized block for the last checkpoint, and + // We've finished sending checkpoint verified blocks when: + // - we've sent the verified block for the last checkpoint, and // - it has been successfully written to disk. // // We detect the last checkpoint by looking for non-finalized blocks @@ -691,17 +699,18 @@ impl StateService { // TODO: configure the state with the last checkpoint hash instead? if self.finalized_block_write_sender.is_some() && self - .queued_non_finalized_blocks - .has_queued_children(self.last_sent_finalized_block_hash) - && self.read_service.db.finalized_tip_hash() == self.last_sent_finalized_block_hash + .non_finalized_state_queued_blocks + .has_queued_children(self.finalized_block_write_last_sent_hash) + && self.read_service.db.finalized_tip_hash() + == self.finalized_block_write_last_sent_hash { - // Tell the block write task to stop committing finalized blocks, - // and move on to committing non-finalized blocks. + // Tell the block write task to stop committing checkpoint verified blocks to the finalized state, + // and move on to committing semantically verified blocks to the non-finalized state. std::mem::drop(self.finalized_block_write_sender.take()); - // We've finished committing finalized blocks, so drop any repeated queued blocks. + // We've finished committing checkpoint verified blocks to finalized state, so drop any repeated queued blocks. self.clear_finalized_block_queue( - "already finished committing finalized blocks: dropped duplicate block, \ + "already finished committing checkpoint verified blocks: dropped duplicate block, \ block is already committed to the state", ); } @@ -722,10 +731,10 @@ impl StateService { "Finalized state must have at least one block before committing non-finalized state", ); - self.queued_non_finalized_blocks + self.non_finalized_state_queued_blocks .prune_by_height(finalized_tip_height); - self.sent_non_finalized_block_hashes + self.non_finalized_block_write_sent_hashes .prune_by_height(finalized_tip_height); } @@ -734,13 +743,13 @@ impl StateService { /// Returns `true` if `hash` is a valid previous block hash for new non-finalized blocks. fn can_fork_chain_at(&self, hash: &block::Hash) -> bool { - self.sent_non_finalized_block_hashes.contains(hash) + self.non_finalized_block_write_sent_hashes.contains(hash) || &self.read_service.db.finalized_tip_hash() == hash } /// Returns `true` if `queued_height` is near the final checkpoint. /// - /// The non-finalized block verifier needs access to UTXOs from finalized blocks + /// The semantic block verifier needs access to UTXOs from checkpoint verified blocks /// near the final checkpoint, so that it can verify blocks that spend those UTXOs. /// /// If it doesn't have the required UTXOs, some blocks will time out, @@ -759,18 +768,19 @@ impl StateService { while let Some(parent_hash) = new_parents.pop() { let queued_children = self - .queued_non_finalized_blocks + .non_finalized_state_queued_blocks .dequeue_children(parent_hash); for queued_child in queued_children { - let (PreparedBlock { hash, .. }, _) = queued_child; + let (SemanticallyVerifiedBlock { hash, .. }, _) = queued_child; - self.sent_non_finalized_block_hashes.add(&queued_child.0); + self.non_finalized_block_write_sent_hashes + .add(&queued_child.0); let send_result = non_finalized_block_write_sender.send(queued_child); if let Err(SendError(queued)) = send_result { // If Zebra is shutting down, drop blocks and return an error. - Self::send_non_finalized_block_error( + Self::send_semantically_verified_block_error( queued, "block commit task exited. Is Zebra shutting down?", ); @@ -786,7 +796,7 @@ impl StateService { } } - self.sent_non_finalized_block_hashes.finish_batch(); + self.non_finalized_block_write_sent_hashes.finish_batch(); }; } @@ -798,12 +808,12 @@ impl StateService { ) } - /// Assert some assumptions about the prepared `block` before it is queued. - fn assert_block_can_be_validated(&self, block: &PreparedBlock) { - // required by CommitBlock call + /// Assert some assumptions about the semantically verified `block` before it is queued. + fn assert_block_can_be_validated(&self, block: &SemanticallyVerifiedBlock) { + // required by `Request::CommitSemanticallyVerifiedBlock` call assert!( block.height > self.network.mandatory_checkpoint_height(), - "invalid non-finalized block height: the canopy checkpoint is mandatory, pre-canopy \ + "invalid semantically verified block height: the canopy checkpoint is mandatory, pre-canopy \ blocks, and the canopy activation block, must be committed to the state as finalized \ blocks" ); @@ -899,13 +909,13 @@ impl Service for StateService { let span = Span::current(); match req { - // Uses queued_non_finalized_blocks and pending_utxos in the StateService + // Uses non_finalized_state_queued_blocks and pending_utxos in the StateService // Accesses shared writeable state in the StateService, NonFinalizedState, and ZebraDb. - Request::CommitBlock(prepared) => { - self.assert_block_can_be_validated(&prepared); + Request::CommitSemanticallyVerifiedBlock(semantically_verified) => { + self.assert_block_can_be_validated(&semantically_verified); self.pending_utxos - .check_against_ordered(&prepared.new_outputs); + .check_against_ordered(&semantically_verified.new_outputs); // # Performance // @@ -919,7 +929,9 @@ impl Service for StateService { // https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html let rsp_rx = tokio::task::block_in_place(move || { - span.in_scope(|| self.queue_and_commit_non_finalized(prepared)) + span.in_scope(|| { + self.queue_and_commit_to_non_finalized_state(semantically_verified) + }) }); // TODO: @@ -927,14 +939,16 @@ impl Service for StateService { // as well as in poll_ready() // The work is all done, the future just waits on a channel for the result - timer.finish(module_path!(), line!(), "CommitBlock"); + timer.finish(module_path!(), line!(), "CommitSemanticallyVerifiedBlock"); let span = Span::current(); async move { rsp_rx .await .map_err(|_recv_error| { - BoxError::from("block was dropped from the state CommitBlock queue") + BoxError::from( + "block was dropped from the queue of non-finalized blocks", + ) }) // TODO: replace with Result::flatten once it stabilises // https://github.com/rust-lang/rust/issues/70142 @@ -946,39 +960,43 @@ impl Service for StateService { .boxed() } - // Uses queued_finalized_blocks and pending_utxos in the StateService. + // Uses finalized_state_queued_blocks and pending_utxos in the StateService. // Accesses shared writeable state in the StateService. - Request::CommitFinalizedBlock(finalized) => { + Request::CommitCheckpointVerifiedBlock(finalized) => { // # Consensus // - // A non-finalized block verification could have called AwaitUtxo - // before this finalized block arrived in the state. - // So we need to check for pending UTXOs here for non-finalized blocks, - // even though it is redundant for most finalized blocks. - // (Finalized blocks are verified using block hash checkpoints + // A semantic block verification could have called AwaitUtxo + // before this checkpoint verified block arrived in the state. + // So we need to check for pending UTXO requests sent by running + // semantic block verifications. + // + // This check is redundant for most checkpoint verified blocks, + // because semantic verification can only succeed near the final + // checkpoint, when all the UTXOs are available for the verifying block. + // + // (Checkpoint block UTXOs are verified using block hash checkpoints // and transaction merkle tree block header commitments.) - self.pending_utxos.check_against(&finalized.new_outputs); + self.pending_utxos + .check_against_ordered(&finalized.new_outputs); // # Performance // // This method doesn't block, access the database, or perform CPU-intensive tasks, // so we can run it directly in the tokio executor's Future threads. - let rsp_rx = self.queue_and_commit_finalized(finalized); + let rsp_rx = self.queue_and_commit_to_finalized_state(finalized); // TODO: // - check for panics in the block write task here, // as well as in poll_ready() // The work is all done, the future just waits on a channel for the result - timer.finish(module_path!(), line!(), "CommitFinalizedBlock"); + timer.finish(module_path!(), line!(), "CommitCheckpointVerifiedBlock"); async move { rsp_rx .await .map_err(|_recv_error| { - BoxError::from( - "block was dropped from the state CommitFinalizedBlock queue", - ) + BoxError::from("block was dropped from the queue of finalized blocks") }) // TODO: replace with Result::flatten once it stabilises // https://github.com/rust-lang/rust/issues/70142 @@ -990,7 +1008,7 @@ impl Service for StateService { .boxed() } - // Uses pending_utxos and queued_non_finalized_blocks in the StateService. + // Uses pending_utxos and non_finalized_state_queued_blocks in the StateService. // If the UTXO isn't in the queued blocks, runs concurrently using the ReadStateService. Request::AwaitUtxo(outpoint) => { // Prepare the AwaitUtxo future from PendingUxtos. @@ -1002,7 +1020,7 @@ impl Service for StateService { // Check the non-finalized block queue outside the returned future, // so we can access mutable state fields. - if let Some(utxo) = self.queued_non_finalized_blocks.utxo(&outpoint) { + if let Some(utxo) = self.non_finalized_state_queued_blocks.utxo(&outpoint) { self.pending_utxos.respond(&outpoint, utxo); // We're finished, the returned future gets the UTXO from the respond() channel. @@ -1012,7 +1030,7 @@ impl Service for StateService { } // Check the sent non-finalized blocks - if let Some(utxo) = self.sent_non_finalized_block_hashes.utxo(&outpoint) { + if let Some(utxo) = self.non_finalized_block_write_sent_hashes.utxo(&outpoint) { self.pending_utxos.respond(&outpoint, utxo); // We're finished, the returned future gets the UTXO from the respond() channel. @@ -1021,7 +1039,7 @@ impl Service for StateService { return response_fut; } - // We ignore any UTXOs in FinalizedState.queued_finalized_blocks, + // We ignore any UTXOs in FinalizedState.finalized_state_queued_blocks, // because it is only used during checkpoint verification. // // This creates a rare race condition, but it doesn't seem to happen much in practice. @@ -1143,19 +1161,17 @@ impl Service for ReadStateService { fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { // Check for panics in the block write task + // + // TODO: move into a check_for_panics() method let block_write_task = self.block_write_task.take(); if let Some(block_write_task) = block_write_task { if block_write_task.is_finished() { - match Arc::try_unwrap(block_write_task) { + if let Some(block_write_task) = Arc::into_inner(block_write_task) { // We are the last state with a reference to this task, so we can propagate any panics - Ok(block_write_task_handle) => { - if let Err(thread_panic) = block_write_task_handle.join() { - std::panic::resume_unwind(thread_panic); - } + if let Err(thread_panic) = block_write_task.join() { + std::panic::resume_unwind(thread_panic); } - // We're not the last state, so we need to put it back - Err(arc_block_write_task) => self.block_write_task = Some(arc_block_write_task), } } else { // It hasn't finished, so we need to put it back @@ -1163,6 +1179,8 @@ impl Service for ReadStateService { } } + self.db.check_for_panics(); + Poll::Ready(Ok(())) } @@ -1191,8 +1209,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Tip(tip)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Tip")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1213,8 +1230,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Depth(depth)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Depth")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1237,10 +1253,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BestChainNextMedianTimePast(median_time_past?)) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::BestChainNextMedianTimePast") - }) - .boxed() + .wait_for_panics() } // Used by the get_block (raw) RPC and the StateService. @@ -1265,8 +1278,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Block(block)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Block")) - .boxed() + .wait_for_panics() } // For the get_raw_transaction RPC and the StateService. @@ -1284,8 +1296,7 @@ impl Service for ReadStateService { Ok(ReadResponse::Transaction(response)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::Transaction")) - .boxed() + .wait_for_panics() } // Used by the getblock (verbose) RPC. @@ -1314,10 +1325,7 @@ impl Service for ReadStateService { Ok(ReadResponse::TransactionIdsForBlock(transaction_ids)) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::TransactionIdsForBlock") - }) - .boxed() + .wait_for_panics() } ReadRequest::UnspentBestChainUtxo(outpoint) => { @@ -1341,8 +1349,7 @@ impl Service for ReadStateService { Ok(ReadResponse::UnspentBestChainUtxo(utxo)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::UnspentBestChainUtxo")) - .boxed() + .wait_for_panics() } // Manually used by the StateService to implement part of AwaitUtxo. @@ -1363,8 +1370,7 @@ impl Service for ReadStateService { Ok(ReadResponse::AnyChainUtxo(utxo)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::AnyChainUtxo")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1387,8 +1393,7 @@ impl Service for ReadStateService { )) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::BlockLocator")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1415,8 +1420,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BlockHashes(block_hashes)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::FindBlockHashes")) - .boxed() + .wait_for_panics() } // Used by the StateService. @@ -1448,8 +1452,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BlockHeaders(block_headers)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::FindBlockHeaders")) - .boxed() + .wait_for_panics() } ReadRequest::SaplingTree(hash_or_height) => { @@ -1473,8 +1476,7 @@ impl Service for ReadStateService { Ok(ReadResponse::SaplingTree(sapling_tree)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::SaplingTree")) - .boxed() + .wait_for_panics() } ReadRequest::OrchardTree(hash_or_height) => { @@ -1498,8 +1500,7 @@ impl Service for ReadStateService { Ok(ReadResponse::OrchardTree(orchard_tree)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::OrchardTree")) - .boxed() + .wait_for_panics() } // For the get_address_balance RPC. @@ -1524,8 +1525,7 @@ impl Service for ReadStateService { Ok(ReadResponse::AddressBalance(balance)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::AddressBalance")) - .boxed() + .wait_for_panics() } // For the get_address_tx_ids RPC. @@ -1558,10 +1558,7 @@ impl Service for ReadStateService { tx_ids.map(ReadResponse::AddressesTransactionIds) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::TransactionIdsByAddresses") - }) - .boxed() + .wait_for_panics() } // For the get_address_utxos RPC. @@ -1587,8 +1584,7 @@ impl Service for ReadStateService { utxos.map(ReadResponse::AddressUtxos) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::UtxosByAddresses")) - .boxed() + .wait_for_panics() } ReadRequest::CheckBestChainTipNullifiersAndAnchors(unmined_tx) => { @@ -1621,11 +1617,7 @@ impl Service for ReadStateService { Ok(ReadResponse::ValidBestChainTipNullifiersAndAnchors) }) }) - .map(|join_result| { - join_result - .expect("panic in ReadRequest::CheckBestChainTipNullifiersAndAnchors") - }) - .boxed() + .wait_for_panics() } // Used by the get_block and get_block_hash RPCs. @@ -1654,8 +1646,7 @@ impl Service for ReadStateService { Ok(ReadResponse::BlockHash(hash)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::BestChainBlockHash")) - .boxed() + .wait_for_panics() } // Used by get_block_template RPC. @@ -1694,8 +1685,7 @@ impl Service for ReadStateService { get_block_template_info.map(ReadResponse::ChainInfo) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::ChainInfo")) - .boxed() + .wait_for_panics() } // Used by getmininginfo, getnetworksolps, and getnetworkhashps RPCs. @@ -1748,12 +1738,11 @@ impl Service for ReadStateService { Ok(ReadResponse::SolutionRate(solution_rate)) }) }) - .map(|join_result| join_result.expect("panic in ReadRequest::SolutionRate")) - .boxed() + .wait_for_panics() } #[cfg(feature = "getblocktemplate-rpcs")] - ReadRequest::CheckBlockProposalValidity(prepared) => { + ReadRequest::CheckBlockProposalValidity(semantically_verified) => { let state = self.clone(); // # Performance @@ -1762,7 +1751,7 @@ impl Service for ReadStateService { tokio::task::spawn_blocking(move || { span.in_scope(move || { - tracing::info!("attempting to validate and commit block proposal onto a cloned non-finalized state"); + tracing::debug!("attempting to validate and commit block proposal onto a cloned non-finalized state"); let mut latest_non_finalized_state = state.latest_non_finalized_state(); // The previous block of a valid proposal must be on the best chain tip. @@ -1770,7 +1759,7 @@ impl Service for ReadStateService { return Err("state is empty: wait for Zebra to sync before submitting a proposal".into()); }; - if prepared.block.header.previous_block_hash != best_tip_hash { + if semantically_verified.block.header.previous_block_hash != best_tip_hash { return Err("proposal is not based on the current best chain tip: previous block hash must be the best chain tip".into()); } @@ -1778,13 +1767,13 @@ impl Service for ReadStateService { // The non-finalized state that's used in the rest of the state (including finalizing // blocks into the db) is not mutated here. // - // TODO: Convert `CommitBlockError` to a new `ValidateProposalError`? + // TODO: Convert `CommitSemanticallyVerifiedError` to a new `ValidateProposalError`? latest_non_finalized_state.disable_metrics(); write::validate_and_commit_non_finalized( &state.db, &mut latest_non_finalized_state, - prepared, + semantically_verified, )?; // The work is done in the future. @@ -1797,10 +1786,7 @@ impl Service for ReadStateService { Ok(ReadResponse::ValidBlockProposal) }) }) - .map(|join_result| { - join_result.expect("panic in ReadRequest::CheckBlockProposalValidity") - }) - .boxed() + .wait_for_panics() } } } diff --git a/zebra-state/src/service/arbitrary.rs b/zebra-state/src/service/arbitrary.rs index accc9db7a2d..f6185617c59 100644 --- a/zebra-state/src/service/arbitrary.rs +++ b/zebra-state/src/service/arbitrary.rs @@ -23,7 +23,7 @@ use zebra_chain::{ use crate::{ arbitrary::Prepare, service::{check, ReadStateService, StateService}, - BoxError, ChainTipChange, Config, LatestChainTip, PreparedBlock, Request, Response, + BoxError, ChainTipChange, Config, LatestChainTip, Request, Response, SemanticallyVerifiedBlock, }; pub use zebra_chain::block::arbitrary::MAX_PARTIAL_CHAIN_BLOCKS; @@ -33,7 +33,7 @@ pub const CHAIN_TIP_UPDATE_WAIT_LIMIT: Duration = Duration::from_secs(2); #[derive(Debug)] pub struct PreparedChainTree { - chain: Arc>>, + chain: Arc>>, count: BinarySearch, network: Network, history_tree: Arc, @@ -41,7 +41,7 @@ pub struct PreparedChainTree { impl ValueTree for PreparedChainTree { type Value = ( - Arc>>, + Arc>>, ::Value, Network, Arc, @@ -71,7 +71,7 @@ pub struct PreparedChain { chain: std::sync::Mutex< Option<( Network, - Arc>>, + Arc>>, Arc, )>, >, @@ -173,7 +173,10 @@ impl Strategy for PreparedChain { } let chain = chain.clone().expect("should be generated"); - let count = (1..chain.1.len()).new_tree(runner)?; + // The generated chain should contain at least two blocks: + // 1. the zeroth genesis block, and + // 2. a first block. + let count = (2..chain.1.len()).new_tree(runner)?; Ok(PreparedChainTree { chain: chain.1, count, @@ -199,7 +202,7 @@ pub async fn populated_state( ) { let requests = blocks .into_iter() - .map(|block| Request::CommitFinalizedBlock(block.into())); + .map(|block| Request::CommitCheckpointVerifiedBlock(block.into())); // TODO: write a test that checks the finalized to non-finalized transition with UTXOs, // and set max_checkpoint_height and checkpoint_verify_concurrency_limit correctly. diff --git a/zebra-state/src/service/chain_tip.rs b/zebra-state/src/service/chain_tip.rs index 80675609aae..76f57bfabe7 100644 --- a/zebra-state/src/service/chain_tip.rs +++ b/zebra-state/src/service/chain_tip.rs @@ -20,7 +20,8 @@ use zebra_chain::{ }; use crate::{ - request::ContextuallyValidBlock, service::watch_receiver::WatchReceiver, FinalizedBlock, + request::ContextuallyVerifiedBlock, service::watch_receiver::WatchReceiver, + CheckpointVerifiedBlock, SemanticallyVerifiedBlock, }; use TipAction::*; @@ -85,9 +86,9 @@ impl fmt::Display for ChainTipBlock { } } -impl From for ChainTipBlock { - fn from(contextually_valid: ContextuallyValidBlock) -> Self { - let ContextuallyValidBlock { +impl From for ChainTipBlock { + fn from(contextually_valid: ContextuallyVerifiedBlock) -> Self { + let ContextuallyVerifiedBlock { block, hash, height, @@ -106,15 +107,15 @@ impl From for ChainTipBlock { } } -impl From for ChainTipBlock { - fn from(finalized: FinalizedBlock) -> Self { - let FinalizedBlock { +impl From for ChainTipBlock { + fn from(finalized: CheckpointVerifiedBlock) -> Self { + let CheckpointVerifiedBlock(SemanticallyVerifiedBlock { block, hash, height, transaction_hashes, .. - } = finalized; + }) = finalized; Self { hash, diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index d9db02c154b..bd8dd8b8648 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -16,7 +16,7 @@ use crate::{ block_iter::any_ancestor_blocks, check::difficulty::POW_ADJUSTMENT_BLOCK_SPAN, finalized_state::ZebraDb, non_finalized_state::NonFinalizedState, }, - BoxError, PreparedBlock, ValidateContextError, + BoxError, SemanticallyVerifiedBlock, ValidateContextError, }; // use self as check @@ -38,8 +38,8 @@ mod tests; pub(crate) use difficulty::AdjustedDifficulty; -/// Check that the `prepared` block is contextually valid for `network`, based -/// on the `finalized_tip_height` and `relevant_chain`. +/// Check that the semantically verified block is contextually valid for `network`, +/// based on the `finalized_tip_height` and `relevant_chain`. /// /// This function performs checks that require a small number of recent blocks, /// including previous hash, previous height, and block difficulty. @@ -50,9 +50,9 @@ pub(crate) use difficulty::AdjustedDifficulty; /// # Panics /// /// If the state contains less than 28 ([`POW_ADJUSTMENT_BLOCK_SPAN`]) blocks. -#[tracing::instrument(skip(prepared, finalized_tip_height, relevant_chain))] +#[tracing::instrument(skip(semantically_verified, finalized_tip_height, relevant_chain))] pub(crate) fn block_is_valid_for_recent_chain( - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, network: Network, finalized_tip_height: Option, relevant_chain: C, @@ -64,7 +64,7 @@ where { let finalized_tip_height = finalized_tip_height .expect("finalized state must contain at least one block to do contextual validation"); - check::block_is_not_orphaned(finalized_tip_height, prepared.height)?; + check::block_is_not_orphaned(finalized_tip_height, semantically_verified.height)?; let relevant_chain: Vec<_> = relevant_chain .into_iter() @@ -78,21 +78,27 @@ where let parent_height = parent_block .coinbase_height() .expect("valid blocks have a coinbase height"); - check::height_one_more_than_parent_height(parent_height, prepared.height)?; + check::height_one_more_than_parent_height(parent_height, semantically_verified.height)?; - // skip this check during tests if we don't have enough blocks in the chain - #[cfg(test)] if relevant_chain.len() < POW_ADJUSTMENT_BLOCK_SPAN { + // skip this check during tests if we don't have enough blocks in the chain + // process_queued also checks the chain length, so we can skip this assertion during testing + // (tests that want to check this code should use the correct number of blocks) + // + // TODO: accept a NotReadyToBeCommitted error in those tests instead + #[cfg(test)] return Ok(()); + + // In production, blocks without enough context are invalid. + // + // The BlockVerifierRouter makes sure that the first 1 million blocks (or more) are + // checkpoint verified. The state queues and block write task make sure that blocks are + // committed in strict height order. But this function is only called on semantically + // verified blocks, so there will be at least 1 million blocks in the state when it is + // called. So this error should never happen. + #[cfg(not(test))] + return Err(ValidateContextError::NotReadyToBeCommitted); } - // process_queued also checks the chain length, so we can skip this assertion during testing - // (tests that want to check this code should use the correct number of blocks) - assert_eq!( - relevant_chain.len(), - POW_ADJUSTMENT_BLOCK_SPAN, - "state must contain enough blocks to do proof of work contextual validation, \ - and validation must receive the exact number of required blocks" - ); let relevant_data = relevant_chain.iter().map(|block| { ( @@ -101,9 +107,9 @@ where ) }); let difficulty_adjustment = - AdjustedDifficulty::new_from_block(&prepared.block, network, relevant_data); + AdjustedDifficulty::new_from_block(&semantically_verified.block, network, relevant_data); check::difficulty_threshold_and_time_are_valid( - prepared.block.header.difficulty_threshold, + semantically_verified.block.header.difficulty_threshold, difficulty_adjustment, )?; @@ -369,23 +375,23 @@ where pub(crate) fn initial_contextual_validity( finalized_state: &ZebraDb, non_finalized_state: &NonFinalizedState, - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let relevant_chain = any_ancestor_blocks( non_finalized_state, finalized_state, - prepared.block.header.previous_block_hash, + semantically_verified.block.header.previous_block_hash, ); // Security: check proof of work before any other checks check::block_is_valid_for_recent_chain( - prepared, + semantically_verified, non_finalized_state.network, finalized_state.finalized_tip_height(), relevant_chain, )?; - check::nullifier::no_duplicates_in_finalized_chain(prepared, finalized_state)?; + check::nullifier::no_duplicates_in_finalized_chain(semantically_verified, finalized_state)?; Ok(()) } diff --git a/zebra-state/src/service/check/anchors.rs b/zebra-state/src/service/check/anchors.rs index a2467693a3e..5f6ee293e34 100644 --- a/zebra-state/src/service/check/anchors.rs +++ b/zebra-state/src/service/check/anchors.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::{ service::{finalized_state::ZebraDb, non_finalized_state::Chain}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; /// Checks the final Sapling and Orchard anchors specified by `transaction` @@ -152,7 +152,7 @@ fn fetch_sprout_final_treestates( let input_tree = parent_chain .and_then(|chain| chain.sprout_trees_by_anchor.get(&joinsplit.anchor).cloned()) - .or_else(|| finalized_state.sprout_note_commitment_tree_by_anchor(&joinsplit.anchor)); + .or_else(|| finalized_state.sprout_tree_by_anchor(&joinsplit.anchor)); if let Some(input_tree) = input_tree { sprout_final_treestates.insert(joinsplit.anchor, input_tree); @@ -190,7 +190,7 @@ fn fetch_sprout_final_treestates( /// treestate of any prior `JoinSplit` _within the same transaction_. /// /// This method searches for anchors in the supplied `sprout_final_treestates` -/// (which must be populated with all treestates pointed to in the `prepared` block; +/// (which must be populated with all treestates pointed to in the `semantically_verified` block; /// see [`fetch_sprout_final_treestates()`]); or in the interstitial /// treestates which are computed on the fly in this function. #[tracing::instrument(skip(sprout_final_treestates, transaction))] @@ -312,9 +312,9 @@ fn sprout_anchors_refer_to_treestates( Ok(()) } -/// Accepts a [`ZebraDb`], [`Chain`], and [`PreparedBlock`]. +/// Accepts a [`ZebraDb`], [`Chain`], and [`SemanticallyVerifiedBlock`]. /// -/// Iterates over the transactions in the [`PreparedBlock`] checking the final Sapling and Orchard anchors. +/// Iterates over the transactions in the [`SemanticallyVerifiedBlock`] checking the final Sapling and Orchard anchors. /// /// This method checks for anchors computed from the final treestate of each block in /// the `parent_chain` or `finalized_state`. @@ -322,25 +322,28 @@ fn sprout_anchors_refer_to_treestates( pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( finalized_state: &ZebraDb, parent_chain: &Arc, - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, ) -> Result<(), ValidateContextError> { - prepared.block.transactions.iter().enumerate().try_for_each( - |(tx_index_in_block, transaction)| { + semantically_verified + .block + .transactions + .iter() + .enumerate() + .try_for_each(|(tx_index_in_block, transaction)| { sapling_orchard_anchors_refer_to_final_treestates( finalized_state, Some(parent_chain), transaction, - prepared.transaction_hashes[tx_index_in_block], + semantically_verified.transaction_hashes[tx_index_in_block], Some(tx_index_in_block), - Some(prepared.height), + Some(semantically_verified.height), ) - }, - ) + }) } -/// Accepts a [`ZebraDb`], [`Arc`](Chain), and [`PreparedBlock`]. +/// Accepts a [`ZebraDb`], [`Arc`](Chain), and [`SemanticallyVerifiedBlock`]. /// -/// Iterates over the transactions in the [`PreparedBlock`], and fetches the Sprout final treestates +/// Iterates over the transactions in the [`SemanticallyVerifiedBlock`], and fetches the Sprout final treestates /// from the state. /// /// Returns a `HashMap` of the Sprout final treestates from the state for [`sprout_anchors_refer_to_treestates()`] @@ -353,18 +356,20 @@ pub(crate) fn block_sapling_orchard_anchors_refer_to_final_treestates( pub(crate) fn block_fetch_sprout_final_treestates( finalized_state: &ZebraDb, parent_chain: &Arc, - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, ) -> HashMap> { let mut sprout_final_treestates = HashMap::new(); - for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { + for (tx_index_in_block, transaction) in + semantically_verified.block.transactions.iter().enumerate() + { fetch_sprout_final_treestates( &mut sprout_final_treestates, finalized_state, Some(parent_chain), transaction, Some(tx_index_in_block), - Some(prepared.height), + Some(semantically_verified.height), ); } @@ -381,7 +386,7 @@ pub(crate) fn block_fetch_sprout_final_treestates( /// treestate of any prior `JoinSplit` _within the same transaction_. /// /// This method searches for anchors in the supplied `sprout_final_treestates` -/// (which must be populated with all treestates pointed to in the `prepared` block; +/// (which must be populated with all treestates pointed to in the `semantically_verified` block; /// see [`fetch_sprout_final_treestates()`]); or in the interstitial /// treestates which are computed on the fly in this function. #[tracing::instrument(skip(sprout_final_treestates, block, transaction_hashes))] diff --git a/zebra-state/src/service/check/nullifier.rs b/zebra-state/src/service/check/nullifier.rs index f3ea6853fe5..809e78383ba 100644 --- a/zebra-state/src/service/check/nullifier.rs +++ b/zebra-state/src/service/check/nullifier.rs @@ -8,7 +8,7 @@ use zebra_chain::transaction::Transaction; use crate::{ error::DuplicateNullifierError, service::{finalized_state::ZebraDb, non_finalized_state::Chain}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; // Tidy up some doc links @@ -16,7 +16,7 @@ use crate::{ use crate::service; /// Reject double-spends of nullifers: -/// - one from this [`PreparedBlock`], and the other already committed to the +/// - one from this [`SemanticallyVerifiedBlock`], and the other already committed to the /// [`FinalizedState`](service::FinalizedState). /// /// (Duplicate non-finalized nullifiers are rejected during the chain update, @@ -30,24 +30,24 @@ use crate::service; /// > even if they have the same bit pattern. /// /// -#[tracing::instrument(skip(prepared, finalized_state))] +#[tracing::instrument(skip(semantically_verified, finalized_state))] pub(crate) fn no_duplicates_in_finalized_chain( - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { - for nullifier in prepared.block.sprout_nullifiers() { + for nullifier in semantically_verified.block.sprout_nullifiers() { if finalized_state.contains_sprout_nullifier(nullifier) { Err(nullifier.duplicate_nullifier_error(true))?; } } - for nullifier in prepared.block.sapling_nullifiers() { + for nullifier in semantically_verified.block.sapling_nullifiers() { if finalized_state.contains_sapling_nullifier(nullifier) { Err(nullifier.duplicate_nullifier_error(true))?; } } - for nullifier in prepared.block.orchard_nullifiers() { + for nullifier in semantically_verified.block.orchard_nullifiers() { if finalized_state.contains_orchard_nullifier(nullifier) { Err(nullifier.duplicate_nullifier_error(true))?; } diff --git a/zebra-state/src/service/check/tests/anchors.rs b/zebra-state/src/service/check/tests/anchors.rs index 11564201e12..d96c8b0410b 100644 --- a/zebra-state/src/service/check/tests/anchors.rs +++ b/zebra-state/src/service/check/tests/anchors.rs @@ -18,7 +18,7 @@ use crate::{ write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; // Sprout @@ -105,7 +105,10 @@ fn check_sprout_anchors() { ); } -fn prepare_sprout_block(mut block_to_prepare: Block, reference_block: Block) -> PreparedBlock { +fn prepare_sprout_block( + mut block_to_prepare: Block, + reference_block: Block, +) -> SemanticallyVerifiedBlock { // Convert the coinbase transaction to a version that the non-finalized state will accept. block_to_prepare.transactions[0] = transaction_v4_from_coinbase(&block_to_prepare.transactions[0]).into(); diff --git a/zebra-state/src/service/check/tests/nullifier.rs b/zebra-state/src/service/check/tests/nullifier.rs index 2cfa81c3b08..1a944d017ee 100644 --- a/zebra-state/src/service/check/tests/nullifier.rs +++ b/zebra-state/src/service/check/tests/nullifier.rs @@ -23,7 +23,7 @@ use crate::{ check::nullifier::tx_no_duplicates_in_chain, read, write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - FinalizedBlock, + CheckpointVerifiedBlock, ValidateContextError::{ DuplicateOrchardNullifier, DuplicateSaplingNullifier, DuplicateSproutNullifier, }, @@ -84,8 +84,8 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); // the block was committed prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -351,8 +351,8 @@ proptest! { let block1_hash; // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -451,8 +451,8 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(),None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -633,8 +633,8 @@ proptest! { let block1_hash; // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(),None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -731,8 +731,8 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); @@ -922,8 +922,8 @@ proptest! { let block1_hash; // randomly choose to commit the next block to the finalized or non-finalized state if duplicate_in_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); prop_assert!(commit_result.is_ok()); diff --git a/zebra-state/src/service/check/tests/utxo.rs b/zebra-state/src/service/check/tests/utxo.rs index d35441381e5..acdc2d399a7 100644 --- a/zebra-state/src/service/check/tests/utxo.rs +++ b/zebra-state/src/service/check/tests/utxo.rs @@ -21,7 +21,7 @@ use crate::{ write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - FinalizedBlock, + CheckpointVerifiedBlock, ValidateContextError::{ DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend, MissingTransparentOutput, UnshieldedTransparentCoinbaseSpend, @@ -184,8 +184,8 @@ proptest! { // randomly choose to commit the block to the finalized or non-finalized state if use_finalized_state { - let block1 = FinalizedBlock::from(Arc::new(block1)); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(Arc::new(block1)); + let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); // the block was committed prop_assert_eq!(Some((Height(1), block1.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -272,8 +272,8 @@ proptest! { block2.transactions.push(spend_transaction.into()); if use_finalized_state_spend { - let block2 = FinalizedBlock::from(Arc::new(block2)); - let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test"); + let block2 = CheckpointVerifiedBlock::from(Arc::new(block2)); + let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(),None, "test"); // the block was committed prop_assert_eq!(Some((Height(2), block2.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -611,8 +611,8 @@ proptest! { let block2 = Arc::new(block2); if use_finalized_state_spend { - let block2 = FinalizedBlock::from(block2.clone()); - let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), "test"); + let block2 = CheckpointVerifiedBlock::from(block2.clone()); + let commit_result = finalized_state.commit_finalized_direct(block2.clone().into(), None, "test"); // the block was committed prop_assert_eq!(Some((Height(2), block2.hash)), read::best_tip(&non_finalized_state, &finalized_state.db)); @@ -842,7 +842,7 @@ struct TestState { /// The genesis block that has already been committed to the `state` service's /// finalized state. #[allow(dead_code)] - genesis: FinalizedBlock, + genesis: CheckpointVerifiedBlock, /// A block at height 1, that has already been committed to the `state` service. block1: Arc, @@ -883,8 +883,9 @@ fn new_state_with_mainnet_transparent_data( let block1 = Arc::new(block1); if use_finalized_state { - let block1 = FinalizedBlock::from(block1.clone()); - let commit_result = finalized_state.commit_finalized_direct(block1.clone().into(), "test"); + let block1 = CheckpointVerifiedBlock::from(block1.clone()); + let commit_result = + finalized_state.commit_finalized_direct(block1.clone().into(), None, "test"); // the block was committed assert_eq!( diff --git a/zebra-state/src/service/check/utxo.rs b/zebra-state/src/service/check/utxo.rs index 22001923702..186f89d83af 100644 --- a/zebra-state/src/service/check/utxo.rs +++ b/zebra-state/src/service/check/utxo.rs @@ -10,14 +10,14 @@ use zebra_chain::{ use crate::{ constants::MIN_TRANSPARENT_COINBASE_MATURITY, service::finalized_state::ZebraDb, - PreparedBlock, + SemanticallyVerifiedBlock, ValidateContextError::{ self, DuplicateTransparentSpend, EarlyTransparentSpend, ImmatureTransparentCoinbaseSpend, MissingTransparentOutput, UnshieldedTransparentCoinbaseSpend, }, }; -/// Lookup all the [`transparent::Utxo`]s spent by a [`PreparedBlock`]. +/// Lookup all the [`transparent::Utxo`]s spent by a [`SemanticallyVerifiedBlock`]. /// If any of the spends are invalid, return an error. /// Otherwise, return the looked up UTXOs. /// @@ -36,14 +36,16 @@ use crate::{ /// - spends of an immature transparent coinbase output, /// - unshielded spends of a transparent coinbase output. pub fn transparent_spend( - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, non_finalized_chain_unspent_utxos: &HashMap, non_finalized_chain_spent_utxos: &HashSet, finalized_state: &ZebraDb, ) -> Result, ValidateContextError> { let mut block_spends = HashMap::new(); - for (spend_tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { + for (spend_tx_index_in_block, transaction) in + semantically_verified.block.transactions.iter().enumerate() + { // Coinbase inputs represent new coins, // so there are no UTXOs to mark as spent. let spends = transaction @@ -55,7 +57,7 @@ pub fn transparent_spend( let utxo = transparent_spend_chain_order( spend, spend_tx_index_in_block, - &prepared.new_outputs, + &semantically_verified.new_outputs, non_finalized_chain_unspent_utxos, non_finalized_chain_spent_utxos, finalized_state, @@ -70,7 +72,8 @@ pub fn transparent_spend( // We don't want to use UTXOs from invalid pending blocks, // so we check transparent coinbase maturity and shielding // using known valid UTXOs during non-finalized chain validation. - let spend_restriction = transaction.coinbase_spend_restriction(prepared.height); + let spend_restriction = + transaction.coinbase_spend_restriction(semantically_verified.height); transparent_coinbase_spend(spend, spend_restriction, utxo.as_ref())?; // We don't delete the UTXOs until the block is committed, @@ -86,7 +89,7 @@ pub fn transparent_spend( } } - remaining_transaction_value(prepared, &block_spends)?; + remaining_transaction_value(semantically_verified, &block_spends)?; Ok(block_spends) } @@ -225,11 +228,12 @@ pub fn transparent_coinbase_spend( /// /// pub fn remaining_transaction_value( - prepared: &PreparedBlock, + semantically_verified: &SemanticallyVerifiedBlock, utxos: &HashMap, ) -> Result<(), ValidateContextError> { - for (tx_index_in_block, transaction) in prepared.block.transactions.iter().enumerate() { - // TODO: check coinbase transaction remaining value (#338, #1162) + for (tx_index_in_block, transaction) in + semantically_verified.block.transactions.iter().enumerate() + { if transaction.is_coinbase() { continue; } @@ -244,26 +248,28 @@ pub fn remaining_transaction_value( { Err(ValidateContextError::NegativeRemainingTransactionValue { amount_error, - height: prepared.height, + height: semantically_verified.height, tx_index_in_block, - transaction_hash: prepared.transaction_hashes[tx_index_in_block], + transaction_hash: semantically_verified.transaction_hashes + [tx_index_in_block], }) } Err(amount_error) => { Err(ValidateContextError::CalculateRemainingTransactionValue { amount_error, - height: prepared.height, + height: semantically_verified.height, tx_index_in_block, - transaction_hash: prepared.transaction_hashes[tx_index_in_block], + transaction_hash: semantically_verified.transaction_hashes + [tx_index_in_block], }) } }, Err(value_balance_error) => { Err(ValidateContextError::CalculateTransactionValueBalances { value_balance_error, - height: prepared.height, + height: semantically_verified.height, tx_index_in_block, - transaction_hash: prepared.transaction_hashes[tx_index_in_block], + transaction_hash: semantically_verified.transaction_hashes[tx_index_in_block], }) } }? diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index 2a355646f95..702c9e575aa 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -20,12 +20,12 @@ use std::{ sync::Arc, }; -use zebra_chain::{block, parameters::Network}; +use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network}; use crate::{ - request::FinalizedWithTrees, - service::{check, QueuedFinalized}, - BoxError, CloneError, Config, FinalizedBlock, + request::{FinalizableBlock, SemanticallyVerifiedBlockWithTrees, Treestate}, + service::{check, QueuedCheckpointVerified}, + BoxError, CheckpointVerifiedBlock, CloneError, Config, }; mod disk_db; @@ -161,23 +161,27 @@ impl FinalizedState { self.network } - /// Commit a finalized block to the state. + /// Commit a checkpoint-verified block to the state. /// /// It's the caller's responsibility to ensure that blocks are committed in /// order. pub fn commit_finalized( &mut self, - ordered_block: QueuedFinalized, - ) -> Result { - let (finalized, rsp_tx) = ordered_block; - let result = - self.commit_finalized_direct(finalized.clone().into(), "CommitFinalized request"); + ordered_block: QueuedCheckpointVerified, + prev_note_commitment_trees: Option, + ) -> Result<(CheckpointVerifiedBlock, NoteCommitmentTrees), BoxError> { + let (checkpoint_verified, rsp_tx) = ordered_block; + let result = self.commit_finalized_direct( + checkpoint_verified.clone().into(), + prev_note_commitment_trees, + "commit checkpoint-verified request", + ); if result.is_ok() { metrics::counter!("state.checkpoint.finalized.block.count", 1); metrics::gauge!( "state.checkpoint.finalized.block.height", - finalized.height.0 as f64, + checkpoint_verified.height.0 as f64, ); // This height gauge is updated for both fully verified and checkpoint blocks. @@ -185,14 +189,14 @@ impl FinalizedState { // are committed in order. metrics::gauge!( "zcash.chain.verified.block.height", - finalized.height.0 as f64, + checkpoint_verified.height.0 as f64, ); metrics::counter!("zcash.chain.verified.block.total", 1); } else { metrics::counter!("state.checkpoint.error.block.count", 1); metrics::gauge!( "state.checkpoint.error.block.height", - finalized.height.0 as f64, + checkpoint_verified.height.0 as f64, ); }; @@ -200,9 +204,11 @@ impl FinalizedState { // and the block write task. let result = result.map_err(CloneError::from); - let _ = rsp_tx.send(result.clone().map_err(BoxError::from)); + let _ = rsp_tx.send(result.clone().map(|(hash, _)| hash).map_err(BoxError::from)); - result.map(|_hash| finalized).map_err(BoxError::from) + result + .map(|(_hash, note_commitment_trees)| (checkpoint_verified, note_commitment_trees)) + .map_err(BoxError::from) } /// Immediately commit a `finalized` block to the finalized state. @@ -221,52 +227,28 @@ impl FinalizedState { #[allow(clippy::unwrap_in_result)] pub fn commit_finalized_direct( &mut self, - finalized_with_trees: FinalizedWithTrees, + finalizable_block: FinalizableBlock, + prev_note_commitment_trees: Option, source: &str, - ) -> Result { - let finalized = finalized_with_trees.finalized; - let committed_tip_hash = self.db.finalized_tip_hash(); - let committed_tip_height = self.db.finalized_tip_height(); - - // Assert that callers (including unit tests) get the chain order correct - if self.db.is_empty() { - assert_eq!( - committed_tip_hash, finalized.block.header.previous_block_hash, - "the first block added to an empty state must be a genesis block, source: {source}", - ); - assert_eq!( - block::Height(0), - finalized.height, - "cannot commit genesis: invalid height, source: {source}", - ); - } else { - assert_eq!( - committed_tip_height.expect("state must have a genesis block committed") + 1, - Some(finalized.height), - "committed block height must be 1 more than the finalized tip height, source: {source}", - ); - - assert_eq!( - committed_tip_hash, finalized.block.header.previous_block_hash, - "committed block must be a child of the finalized tip, source: {source}", - ); - } - - let (history_tree, note_commitment_trees) = match finalized_with_trees.treestate { - // If the treestate associated with the block was supplied, use it - // without recomputing it. - Some(ref treestate) => ( - treestate.history_tree.clone(), - treestate.note_commitment_trees.clone(), - ), - // If the treestate was not supplied, retrieve a previous treestate - // from the database, and update it for the block being committed. - None => { + ) -> Result<(block::Hash, NoteCommitmentTrees), BoxError> { + let (height, hash, finalized, prev_note_commitment_trees) = match finalizable_block { + FinalizableBlock::Checkpoint { + checkpoint_verified, + } => { + // Checkpoint-verified blocks don't have an associated treestate, so we retrieve the + // treestate of the finalized tip from the database and update it for the block + // being committed, assuming the retrieved treestate is the parent block's + // treestate. Later on, this function proves this assumption by asserting that the + // finalized tip is the parent block of the block being committed. + + let block = checkpoint_verified.block.clone(); let mut history_tree = self.db.history_tree(); - let mut note_commitment_trees = self.db.note_commitment_trees(); + let prev_note_commitment_trees = + prev_note_commitment_trees.unwrap_or_else(|| self.db.note_commitment_trees()); // Update the note commitment trees. - note_commitment_trees.update_trees_parallel(&finalized.block)?; + let mut note_commitment_trees = prev_note_commitment_trees.clone(); + note_commitment_trees.update_trees_parallel(&block)?; // Check the block commitment if the history tree was not // supplied by the non-finalized state. Note that we don't do @@ -286,7 +268,7 @@ impl FinalizedState { // TODO: run this CPU-intensive cryptography in a parallel rayon // thread, if it shows up in profiles check::block_commitment_is_valid_for_chain_history( - finalized.block.clone(), + block.clone(), self.network, &history_tree, )?; @@ -298,30 +280,69 @@ impl FinalizedState { let history_tree_mut = Arc::make_mut(&mut history_tree); let sapling_root = note_commitment_trees.sapling.root(); let orchard_root = note_commitment_trees.orchard.root(); - history_tree_mut.push( - self.network(), - finalized.block.clone(), - sapling_root, - orchard_root, - )?; - - (history_tree, note_commitment_trees) + history_tree_mut.push(self.network(), block.clone(), sapling_root, orchard_root)?; + + ( + checkpoint_verified.height, + checkpoint_verified.hash, + SemanticallyVerifiedBlockWithTrees { + verified: checkpoint_verified.0, + treestate: Treestate { + note_commitment_trees, + history_tree, + }, + }, + Some(prev_note_commitment_trees), + ) } + FinalizableBlock::Contextual { + contextually_verified, + treestate, + } => ( + contextually_verified.height, + contextually_verified.hash, + SemanticallyVerifiedBlockWithTrees { + verified: contextually_verified.into(), + treestate, + }, + prev_note_commitment_trees, + ), }; - let finalized_height = finalized.height; - let finalized_hash = finalized.hash; + let committed_tip_hash = self.db.finalized_tip_hash(); + let committed_tip_height = self.db.finalized_tip_height(); + + // Assert that callers (including unit tests) get the chain order correct + if self.db.is_empty() { + assert_eq!( + committed_tip_hash, finalized.verified.block.header.previous_block_hash, + "the first block added to an empty state must be a genesis block, source: {source}", + ); + assert_eq!( + block::Height(0), + height, + "cannot commit genesis: invalid height, source: {source}", + ); + } else { + assert_eq!( + committed_tip_height.expect("state must have a genesis block committed") + 1, + Some(height), + "committed block height must be 1 more than the finalized tip height, source: {source}", + ); + + assert_eq!( + committed_tip_hash, finalized.verified.block.header.previous_block_hash, + "committed block must be a child of the finalized tip, source: {source}", + ); + } #[cfg(feature = "elasticsearch")] - let finalized_block = finalized.block.clone(); - - let result = self.db.write_block( - finalized, - history_tree, - note_commitment_trees, - self.network, - source, - ); + let finalized_block = finalized.verified.block.clone(); + let note_commitment_trees = finalized.treestate.note_commitment_trees.clone(); + + let result = + self.db + .write_block(finalized, prev_note_commitment_trees, self.network, source); if result.is_ok() { // Save blocks to elasticsearch if the feature is enabled. @@ -329,10 +350,10 @@ impl FinalizedState { self.elasticsearch(&finalized_block); // TODO: move the stop height check to the syncer (#3442) - if self.is_at_stop_height(finalized_height) { + if self.is_at_stop_height(height) { tracing::info!( - height = ?finalized_height, - hash = ?finalized_hash, + ?height, + ?hash, block_source = ?source, "stopping at configured height, flushing database to disk" ); @@ -349,7 +370,7 @@ impl FinalizedState { } } - result + result.map(|hash| (hash, note_commitment_trees)) } #[cfg(feature = "elasticsearch")] @@ -362,12 +383,31 @@ impl FinalizedState { let block_time = block.header.time.timestamp(); let local_time = chrono::Utc::now().timestamp(); - const AWAY_FROM_TIP_BULK_SIZE: usize = 800; + // Mainnet bulk size is small enough to avoid the elasticsearch 100mb content + // length limitation. MAX_BLOCK_BYTES = 2MB but each block use around 4.1 MB of JSON. + // Each block count as 2 as we send them with a operation/header line. A value of 48 + // is 24 blocks. + const MAINNET_AWAY_FROM_TIP_BULK_SIZE: usize = 48; + + // Testnet bulk size is larger as blocks are generally smaller in the testnet. + // A value of 800 is 400 blocks as we are not counting the operation line. + const TESTNET_AWAY_FROM_TIP_BULK_SIZE: usize = 800; + + // The number of blocks the bulk will have when we are in sync. + // A value of 2 means only 1 block as we want to insert them as soon as we get + // them for a real time experience. This is the same for mainnet and testnet. const CLOSE_TO_TIP_BULK_SIZE: usize = 2; + + // We consider in sync when the local time and the blockchain time difference is + // less than this number of seconds. const CLOSE_TO_TIP_SECONDS: i64 = 14400; // 4 hours - // If we are close to the tip index one block per bulk call. - let mut blocks_size_to_dump = AWAY_FROM_TIP_BULK_SIZE; + let mut blocks_size_to_dump = match self.network { + Network::Mainnet => MAINNET_AWAY_FROM_TIP_BULK_SIZE, + Network::Testnet => TESTNET_AWAY_FROM_TIP_BULK_SIZE, + }; + + // If we are close to the tip, index one block per bulk call. if local_time - block_time < CLOSE_TO_TIP_SECONDS { blocks_size_to_dump = CLOSE_TO_TIP_BULK_SIZE; } @@ -408,12 +448,12 @@ impl FinalizedState { let response_body = response .json::() .await - .expect("ES response parsing to a json_body should never fail"); + .expect("ES response parsing error. Maybe we are sending more than 100 mb of data (`http.max_content_length`)"); let errors = response_body["errors"].as_bool().unwrap_or(true); assert!(!errors, "{}", format!("ES error: {response_body}")); }); - // clean the block storage. + // Clean the block storage. self.elastic_blocks.clear(); } } diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 3c732acc464..b61d6abdf5f 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -7,11 +7,18 @@ //! //! # Correctness //! -//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must +//! The [`crate::constants::DATABASE_FORMAT_VERSION`] constants must //! be incremented each time the database format (column, serialization, etc) changes. -use std::{fmt::Debug, path::Path, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + fmt::Debug, + ops::RangeBounds, + path::Path, + sync::Arc, +}; +use itertools::Itertools; use rlimit::increase_nofile_limit; use zebra_chain::parameters::Network; @@ -141,6 +148,7 @@ impl WriteDisk for DiskWriteBatch { /// defined format // // TODO: just implement these methods directly on DiskDb +// move this trait, its methods, and support methods to another module pub trait ReadDisk { /// Returns true if a rocksdb column family `cf` does not contain any entries. fn zs_is_empty(&self, cf: &C) -> bool @@ -197,6 +205,26 @@ pub trait ReadDisk { C: rocksdb::AsColumnFamilyRef, K: IntoDisk + FromDisk, V: FromDisk; + + /// Returns the keys and values in `cf` in `range`, in an ordered `BTreeMap`. + /// + /// Holding this iterator open might delay block commit transactions. + fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Ord, + V: FromDisk, + R: RangeBounds; + + /// Returns the keys and values in `cf` in `range`, in an unordered `HashMap`. + /// + /// Holding this iterator open might delay block commit transactions. + fn zs_items_in_range_unordered(&self, cf: &C, range: R) -> HashMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Eq + std::hash::Hash, + V: FromDisk, + R: RangeBounds; } impl PartialEq for DiskDb { @@ -337,6 +365,26 @@ impl ReadDisk for DiskDb { }) .expect("unexpected database failure") } + + fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Ord, + V: FromDisk, + R: RangeBounds, + { + self.zs_range_iter(cf, range).collect() + } + + fn zs_items_in_range_unordered(&self, cf: &C, range: R) -> HashMap + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk + Eq + std::hash::Hash, + V: FromDisk, + R: RangeBounds, + { + self.zs_range_iter(cf, range).collect() + } } impl DiskWriteBatch { @@ -361,6 +409,58 @@ impl DiskWriteBatch { } impl DiskDb { + /// Returns an iterator over the items in `cf` in `range`. + /// + /// Holding this iterator open might delay block commit transactions. + fn zs_range_iter(&self, cf: &C, range: R) -> impl Iterator + '_ + where + C: rocksdb::AsColumnFamilyRef, + K: IntoDisk + FromDisk, + V: FromDisk, + R: RangeBounds, + { + use std::ops::Bound::{self, *}; + + // Replace with map() when it stabilises: + // https://github.com/rust-lang/rust/issues/86026 + let map_to_vec = |bound: Bound<&K>| -> Bound> { + match bound { + Unbounded => Unbounded, + Included(x) => Included(x.as_bytes().as_ref().to_vec()), + Excluded(x) => Excluded(x.as_bytes().as_ref().to_vec()), + } + }; + + let start_bound = map_to_vec(range.start_bound()); + let end_bound = map_to_vec(range.end_bound()); + let range = (start_bound.clone(), end_bound); + + let start_bound_vec = + if let Included(ref start_bound) | Excluded(ref start_bound) = start_bound { + start_bound.clone() + } else { + // Actually unused + Vec::new() + }; + + let start_mode = if matches!(start_bound, Unbounded) { + // Unbounded iterators start at the first item + rocksdb::IteratorMode::Start + } else { + rocksdb::IteratorMode::From(start_bound_vec.as_slice(), rocksdb::Direction::Forward) + }; + + // Reading multiple items from iterators has caused database hangs, + // in previous RocksDB versions + self.db + .iterator_cf(cf, start_mode) + .map(|result| result.expect("unexpected database failure")) + .map(|(key, value)| (key.to_vec(), value)) + // Handle Excluded start and the end bound + .filter(move |(key, _value)| range.contains(key)) + .map(|(key, value)| (K::from_bytes(key), V::from_bytes(value))) + } + /// The ideal open file limit for Zebra const IDEAL_OPEN_FILE_LIMIT: u64 = 1024; @@ -386,61 +486,64 @@ impl DiskDb { /// const MEMTABLE_RAM_CACHE_MEGABYTES: usize = 128; + /// The column families supported by the running database code. + const COLUMN_FAMILIES_IN_CODE: &[&'static str] = &[ + // Blocks + "hash_by_height", + "height_by_hash", + "block_header_by_height", + // Transactions + "tx_by_loc", + "hash_by_tx_loc", + "tx_loc_by_hash", + // Transparent + "balance_by_transparent_addr", + "tx_loc_by_transparent_addr_loc", + "utxo_by_out_loc", + "utxo_loc_by_transparent_addr_loc", + // Sprout + "sprout_nullifiers", + "sprout_anchors", + "sprout_note_commitment_tree", + // Sapling + "sapling_nullifiers", + "sapling_anchors", + "sapling_note_commitment_tree", + // Orchard + "orchard_nullifiers", + "orchard_anchors", + "orchard_note_commitment_tree", + // Chain + "history_tree", + "tip_chain_value_pool", + ]; + /// Opens or creates the database at `config.path` for `network`, /// and returns a shared low-level database wrapper. pub fn new(config: &Config, network: Network) -> DiskDb { let path = config.db_path(network); + let db_options = DiskDb::options(); - let column_families = vec![ - // Blocks - rocksdb::ColumnFamilyDescriptor::new("hash_by_height", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("height_by_hash", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("block_header_by_height", db_options.clone()), - // Transactions - rocksdb::ColumnFamilyDescriptor::new("tx_by_loc", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("hash_by_tx_loc", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("tx_loc_by_hash", db_options.clone()), - // Transparent - rocksdb::ColumnFamilyDescriptor::new("balance_by_transparent_addr", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "tx_loc_by_transparent_addr_loc", - db_options.clone(), - ), - rocksdb::ColumnFamilyDescriptor::new("utxo_by_out_loc", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "utxo_loc_by_transparent_addr_loc", - db_options.clone(), - ), - // Sprout - rocksdb::ColumnFamilyDescriptor::new("sprout_nullifiers", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("sprout_anchors", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("sprout_note_commitment_tree", db_options.clone()), - // Sapling - rocksdb::ColumnFamilyDescriptor::new("sapling_nullifiers", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("sapling_anchors", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "sapling_note_commitment_tree", - db_options.clone(), - ), - // Orchard - rocksdb::ColumnFamilyDescriptor::new("orchard_nullifiers", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("orchard_anchors", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new( - "orchard_note_commitment_tree", - db_options.clone(), - ), - // Chain - rocksdb::ColumnFamilyDescriptor::new("history_tree", db_options.clone()), - rocksdb::ColumnFamilyDescriptor::new("tip_chain_value_pool", db_options.clone()), - ]; - - // TODO: move opening the database to a blocking thread (#2188) - let db_result = rocksdb::DBWithThreadMode::::open_cf_descriptors( - &db_options, - &path, - column_families, - ); + // When opening the database in read/write mode, all column families must be opened. + // + // To make Zebra forward-compatible with databases updated by later versions, + // we read any existing column families off the disk, then add any new column families + // from the current implementation. + // + // + let column_families_on_disk = DB::list_cf(&db_options, &path).unwrap_or_default(); + let column_families_in_code = Self::COLUMN_FAMILIES_IN_CODE + .iter() + .map(ToString::to_string); + + let column_families = column_families_on_disk + .into_iter() + .chain(column_families_in_code) + .unique() + .map(|cf_name| rocksdb::ColumnFamilyDescriptor::new(cf_name, db_options.clone())); + + let db_result = DB::open_cf_descriptors(&db_options, &path, column_families); match db_result { Ok(db) => { @@ -651,6 +754,19 @@ impl DiskDb { // Cleanup methods + /// Returns the number of shared instances of this database. + /// + /// # Concurrency + /// + /// The actual number of owners can be higher or lower than the returned value, + /// because databases can simultaneously be cloned or dropped in other threads. + /// + /// However, if the number of owners is 1, and the caller has exclusive access, + /// the count can't increase unless that caller clones the database. + pub(crate) fn shared_database_owners(&self) -> usize { + Arc::strong_count(&self.db) + Arc::weak_count(&self.db) + } + /// Shut down the database, cleaning up background tasks and ephemeral data. /// /// If `force` is true, clean up regardless of any shared references. @@ -671,9 +787,8 @@ impl DiskDb { // instance. If they do, they must drop it before: // - shutting down database threads, or // - deleting database files. - let shared_database_owners = Arc::strong_count(&self.db) + Arc::weak_count(&self.db); - if shared_database_owners > 1 { + if self.shared_database_owners() > 1 { let path = self.path(); let mut ephemeral_note = ""; @@ -722,18 +837,72 @@ impl DiskDb { let path = self.path(); debug!(?path, "flushing database to disk"); - self.db - .flush() - .expect("unexpected failure flushing SST data to disk"); - self.db - .flush_wal(true) - .expect("unexpected failure flushing WAL data to disk"); + // These flushes can fail during forced shutdown or during Drop after a shutdown, + // particularly in tests. If they fail, there's nothing we can do about it anyway. + if let Err(error) = self.db.flush() { + let error = format!("{error:?}"); + if error.to_ascii_lowercase().contains("shutdown in progress") { + debug!( + ?error, + ?path, + "expected shutdown error flushing database SST files to disk" + ); + } else { + info!( + ?error, + ?path, + "unexpected error flushing database SST files to disk during shutdown" + ); + } + } + + if let Err(error) = self.db.flush_wal(true) { + let error = format!("{error:?}"); + if error.to_ascii_lowercase().contains("shutdown in progress") { + debug!( + ?error, + ?path, + "expected shutdown error flushing database WAL buffer to disk" + ); + } else { + info!( + ?error, + ?path, + "unexpected error flushing database WAL buffer to disk during shutdown" + ); + } + } + // # Memory Safety + // // We'd like to call `cancel_all_background_work()` before Zebra exits, // but when we call it, we get memory, thread, or C++ errors when the process exits. // (This seems to be a bug in RocksDB: cancel_all_background_work() should wait until // all the threads have cleaned up.) // + // # Change History + // + // We've changed this setting multiple times since 2021, in response to new RocksDB + // and Rust compiler behaviour. + // + // We enabled cancel_all_background_work() due to failures on: + // - Rust 1.57 on Linux + // + // We disabled cancel_all_background_work() due to failures on: + // - Rust 1.64 on Linux + // + // We tried enabling cancel_all_background_work() due to failures on: + // - Rust 1.70 on macOS 12.6.5 on x86_64 + // but it didn't stop the aborts happening (PR #6820). + // + // There weren't any failures with cancel_all_background_work() disabled on: + // - Rust 1.69 or earlier + // - Linux with Rust 1.70 + // And with cancel_all_background_work() enabled or disabled on: + // - macOS 13.2 on aarch64 (M1), native and emulated x86_64, with Rust 1.70 + // + // # Detailed Description + // // We see these kinds of errors: // ``` // pthread lock: Invalid argument @@ -745,13 +914,26 @@ impl DiskDb { // signal: 11, SIGSEGV: invalid memory reference // ``` // + // # Reference + // // The RocksDB wiki says: // > Q: Is it safe to close RocksDB while another thread is issuing read, write or manual compaction requests? // > // > A: No. The users of RocksDB need to make sure all functions have finished before they close RocksDB. // > You can speed up the waiting by calling CancelAllBackgroundWork(). // - // https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ + // + // + // > rocksdb::DB instances need to be destroyed before your main function exits. + // > RocksDB instances usually depend on some internal static variables. + // > Users need to make sure rocksdb::DB instances are destroyed before those static variables. + // + // + // + // # TODO + // + // Try re-enabling this code and fixing the underlying concurrency bug. + // //info!(?path, "stopping background database tasks"); //self.db.cancel_all_background_work(true); @@ -760,14 +942,7 @@ impl DiskDb { // But Rust's ownership rules make that difficult, // so we just flush and delete ephemeral data instead. // - // The RocksDB wiki says: - // > rocksdb::DB instances need to be destroyed before your main function exits. - // > RocksDB instances usually depend on some internal static variables. - // > Users need to make sure rocksdb::DB instances are destroyed before those static variables. - // - // https://github.com/facebook/rocksdb/wiki/Known-Issues - // - // But this implementation doesn't seem to cause any issues, + // This implementation doesn't seem to cause any issues, // and the RocksDB Drop implementation handles any cleanup. self.delete_ephemeral(); } diff --git a/zebra-state/src/service/finalized_state/disk_format.rs b/zebra-state/src/service/finalized_state/disk_format.rs index e731ff20d8d..716792f1cb1 100644 --- a/zebra-state/src/service/finalized_state/disk_format.rs +++ b/zebra-state/src/service/finalized_state/disk_format.rs @@ -11,6 +11,7 @@ pub mod block; pub mod chain; pub mod shielded; pub mod transparent; +pub mod upgrade; #[cfg(test)] mod tests; diff --git a/zebra-state/src/service/finalized_state/disk_format/shielded.rs b/zebra-state/src/service/finalized_state/disk_format/shielded.rs index 8836549c332..3b136236542 100644 --- a/zebra-state/src/service/finalized_state/disk_format/shielded.rs +++ b/zebra-state/src/service/finalized_state/disk_format/shielded.rs @@ -44,6 +44,13 @@ impl IntoDisk for sprout::tree::Root { } } +impl FromDisk for sprout::tree::Root { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let array: [u8; 32] = bytes.as_ref().try_into().unwrap(); + array.into() + } +} + impl IntoDisk for sapling::tree::Root { type Bytes = [u8; 32]; @@ -52,6 +59,13 @@ impl IntoDisk for sapling::tree::Root { } } +impl FromDisk for sapling::tree::Root { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let array: [u8; 32] = bytes.as_ref().try_into().unwrap(); + array.try_into().expect("finalized data must be valid") + } +} + impl IntoDisk for orchard::tree::Root { type Bytes = [u8; 32]; @@ -60,6 +74,13 @@ impl IntoDisk for orchard::tree::Root { } } +impl FromDisk for orchard::tree::Root { + fn from_bytes(bytes: impl AsRef<[u8]>) -> Self { + let array: [u8; 32] = bytes.as_ref().try_into().unwrap(); + array.try_into().expect("finalized data must be valid") + } +} + // The following implementations for the note commitment trees use `serde` and // `bincode` because currently the inner Merkle tree frontier (from // `incrementalmerkletree`) only supports `serde` for serialization. `bincode` diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs index 8c5edfa03a8..6b7261082ab 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/prop.rs @@ -279,6 +279,13 @@ fn serialized_sprout_tree_root_equal() { ); } +#[test] +fn roundtrip_sprout_tree_root() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + // TODO: test note commitment tree round-trip, after implementing proptest::Arbitrary // Sapling @@ -347,6 +354,13 @@ fn serialized_sapling_tree_root_equal() { ); } +#[test] +fn roundtrip_sapling_tree_root() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + // TODO: test note commitment tree round-trip, after implementing proptest::Arbitrary // Orchard @@ -415,6 +429,13 @@ fn serialized_orchard_tree_root_equal() { ); } +#[test] +fn roundtrip_orchard_tree_root() { + let _init_guard = zebra_test::init(); + + proptest!(|(val in any::())| assert_value_properties(val)); +} + // TODO: test note commitment tree round-trip, after implementing proptest::Arbitrary // Chain diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs index 3c5c9938e15..67b4f2ebb68 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs @@ -103,7 +103,7 @@ fn test_raw_rocksdb_column_families_with_network(network: Network) { .expect("test data deserializes"); state - .commit_finalized_direct(block.into(), "snapshot tests") + .commit_finalized_direct(block.into(), None, "snapshot tests") .expect("test block is valid"); let mut settings = insta::Settings::clone_current(); diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap index bdf69ca735d..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap index 91cde822ee9..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap index 04ee9844634..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@mainnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", - ), - KV( - k: "000002", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap index bdf69ca735d..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap index 91cde822ee9..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap index 04ee9844634..49244e75105 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_note_commitment_tree_raw_data@testnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", - ), - KV( - k: "000002", v: "0001ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap index bdf69ca735d..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap index e4c3af6f7eb..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap index 03feeb64625..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@mainnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", - ), - KV( - k: "000002", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap index bdf69ca735d..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap index e4c3af6f7eb..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_1.snap @@ -5,10 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap index 03feeb64625..e493c279c38 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_note_commitment_tree_raw_data@testnet_2.snap @@ -5,14 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", - ), - KV( - k: "000001", - v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", - ), - KV( - k: "000002", v: "0001fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap index bdf69ca735d..6d9892d5d65 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap index bdf69ca735d..6d9892d5d65 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap @@ -5,6 +5,6 @@ expression: cf_data [ KV( k: "000000", - v: "0000", + v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs new file mode 100644 index 00000000000..cbd8f3b017f --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -0,0 +1,511 @@ +//! In-place format upgrades for the Zebra state database. + +use std::{ + cmp::Ordering, + sync::{mpsc, Arc}, + thread::{self, JoinHandle}, +}; + +use semver::Version; +use tracing::Span; + +use zebra_chain::{ + block::Height, + diagnostic::task::{CheckForPanics, WaitForPanics}, + parameters::Network, +}; + +use DbFormatChange::*; + +use crate::{ + config::write_database_format_version_to_disk, database_format_version_in_code, + database_format_version_on_disk, service::finalized_state::ZebraDb, Config, +}; + +/// The kind of database format change we're performing. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum DbFormatChange { + /// Marking the format as newly created by `running_version`. + /// + /// Newly created databases have no disk version. + NewlyCreated { running_version: Version }, + + /// Upgrading the format from `older_disk_version` to `newer_running_version`. + /// + /// Until this upgrade is complete, the format is a mixture of both versions. + Upgrade { + older_disk_version: Version, + newer_running_version: Version, + }, + + /// Marking the format as downgraded from `newer_disk_version` to `older_running_version`. + /// + /// Until the state is upgraded to `newer_disk_version` by a Zebra version with that state + /// version (or greater), the format will be a mixture of both versions. + Downgrade { + newer_disk_version: Version, + older_running_version: Version, + }, +} + +/// A handle to a spawned format change thread. +/// +/// Cloning this struct creates an additional handle to the same thread. +/// +/// # Concurrency +/// +/// Cancelling the thread on drop has a race condition, because two handles can be dropped at +/// the same time. +/// +/// If cancelling the thread is important, the owner of the handle must call force_cancel(). +#[derive(Clone, Debug)] +pub struct DbFormatChangeThreadHandle { + /// A handle that can wait for the running format change thread to finish. + /// + /// Panics from this thread are propagated into Zebra's state service. + update_task: Option>>, + + /// A channel that tells the running format thread to finish early. + cancel_handle: mpsc::SyncSender, +} + +/// Marker for cancelling a format upgrade. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct CancelFormatChange; + +impl DbFormatChange { + /// Check if loading `disk_version` into `running_version` needs a format change, + /// and if it does, return the required format change. + /// + /// Also logs the kind of change at info level. + /// + /// If `disk_version` is `None`, Zebra is creating a new database. + pub fn new(running_version: Version, disk_version: Option) -> Option { + let Some(disk_version) = disk_version else { + info!( + ?running_version, + "creating new database with the current format" + ); + + return Some(NewlyCreated { running_version }); + }; + + match disk_version.cmp(&running_version) { + Ordering::Less => { + info!( + ?running_version, + ?disk_version, + "trying to open older database format: launching upgrade task" + ); + + Some(Upgrade { + older_disk_version: disk_version, + newer_running_version: running_version, + }) + } + Ordering::Greater => { + info!( + ?running_version, + ?disk_version, + "trying to open newer database format: data should be compatible" + ); + + Some(Downgrade { + newer_disk_version: disk_version, + older_running_version: running_version, + }) + } + Ordering::Equal => { + info!(?running_version, "trying to open current database format"); + + None + } + } + } + + /// Returns true if this change is an upgrade. + #[allow(dead_code)] + pub fn is_upgrade(&self) -> bool { + matches!(self, Upgrade { .. }) + } + + /// Launch a `std::thread` that applies this format change to the database. + /// + /// `initial_tip_height` is the database height when it was opened, and `upgrade_db` is the + /// database instance to upgrade. + pub fn spawn_format_change( + self, + config: Config, + network: Network, + initial_tip_height: Option, + upgrade_db: ZebraDb, + ) -> DbFormatChangeThreadHandle { + // # Correctness + // + // Cancel handles must use try_send() to avoid blocking waiting for the format change + // thread to shut down. + let (cancel_handle, cancel_receiver) = mpsc::sync_channel(1); + + let span = Span::current(); + let update_task = thread::spawn(move || { + span.in_scope(move || { + self.apply_format_change( + config, + network, + initial_tip_height, + upgrade_db, + cancel_receiver, + ); + }) + }); + + let mut handle = DbFormatChangeThreadHandle { + update_task: Some(Arc::new(update_task)), + cancel_handle, + }; + + handle.check_for_panics(); + + handle + } + + /// Apply this format change to the database. + /// + /// Format changes should be launched in an independent `std::thread`, which runs until the + /// upgrade is finished. + /// + /// See `apply_format_upgrade()` for details. + fn apply_format_change( + self, + config: Config, + network: Network, + initial_tip_height: Option, + upgrade_db: ZebraDb, + cancel_receiver: mpsc::Receiver, + ) { + match self { + // Handled in the rest of this function. + Upgrade { .. } => self.apply_format_upgrade( + config, + network, + initial_tip_height, + upgrade_db, + cancel_receiver, + ), + + NewlyCreated { .. } => { + Self::mark_as_newly_created(&config, network); + } + Downgrade { .. } => { + // # Correctness + // + // At the start of a format downgrade, the database must be marked as partially or + // fully downgraded. This lets newer Zebra versions know that some blocks with older + // formats have been added to the database. + Self::mark_as_downgraded(&config, network); + + // Older supported versions just assume they can read newer formats, + // because they can't predict all changes a newer Zebra version could make. + // + // The responsibility of staying backwards-compatible is on the newer version. + // We do this on a best-effort basis for versions that are still supported. + } + } + } + + /// Apply any required format updates to the database. + /// Format changes should be launched in an independent `std::thread`. + /// + /// If `cancel_receiver` gets a message, or its sender is dropped, + /// the format change stops running early. + /// + /// See the format upgrade design docs for more details: + /// + // + // New format upgrades must be added to the *end* of this method. + fn apply_format_upgrade( + self, + config: Config, + network: Network, + initial_tip_height: Option, + upgrade_db: ZebraDb, + cancel_receiver: mpsc::Receiver, + ) { + let Upgrade { + newer_running_version, + older_disk_version, + } = self + else { + unreachable!("already checked for Upgrade") + }; + + // # New Upgrades Sometimes Go Here + // + // If the format change is outside RocksDb, put new code above this comment! + let Some(initial_tip_height) = initial_tip_height else { + // If the database is empty, then the RocksDb format doesn't need any changes. + info!( + ?newer_running_version, + ?older_disk_version, + "marking empty database as upgraded" + ); + + Self::mark_as_upgraded_to(&database_format_version_in_code(), &config, network); + + info!( + ?newer_running_version, + ?older_disk_version, + "empty database is fully upgraded" + ); + + return; + }; + + // Example format change. + + // Check if we need to do this upgrade. + let database_format_add_format_change_task = + Version::parse("25.0.2").expect("version constant is valid"); + + if older_disk_version < database_format_add_format_change_task { + let mut upgrade_height = Height(0); + + // Go through every height from genesis to the tip of the old version. + // If the state was downgraded, some heights might already be upgraded. + // (Since the upgraded format is added to the tip, the database can switch between + // lower and higher versions at any block.) + // + // Keep upgrading until the initial database has been upgraded, + // or this task is cancelled by a shutdown. + while upgrade_height <= initial_tip_height + && matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) + { + // TODO: Do one format upgrade step here + // + // This fake step just shows how to access the database. + let _replace_me_ = upgrade_db.tip(); + + upgrade_height = (upgrade_height + 1).expect("task exits before maximum height"); + } + + // At the end of each format upgrade, the database is marked as upgraded to that version. + // Upgrades can be run more than once if Zebra is restarted, so this is just a performance + // optimisation. + info!( + ?initial_tip_height, + ?newer_running_version, + ?older_disk_version, + "marking database as upgraded" + ); + Self::mark_as_upgraded_to(&database_format_add_format_change_task, &config, network); + } + + // End of example format change. + + // # New Upgrades Usually Go Here + // + // New code goes above this comment! + // + // Run the latest format upgrade code after the other upgrades are complete, + // then mark the format as upgraded. The code should check `cancel_receiver` + // every time it runs its inner update loop. + info!( + ?initial_tip_height, + ?newer_running_version, + ?older_disk_version, + "database is fully upgraded" + ); + } + + /// Mark a newly created database with the current format version. + /// + /// This should be called when a newly created database is opened. + /// + /// # Concurrency + /// + /// The version must only be updated while RocksDB is holding the database + /// directory lock. This prevents multiple Zebra instances corrupting the version + /// file. + /// + /// # Panics + /// + /// If the format should not have been upgraded, because the database is not newly created. + fn mark_as_newly_created(config: &Config, network: Network) { + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file path"); + let running_version = database_format_version_in_code(); + + assert_eq!( + disk_version, None, + "can't overwrite the format version in an existing database:\n\ + disk: {disk_version:?}\n\ + running: {running_version}" + ); + + write_database_format_version_to_disk(&running_version, config, network) + .expect("unable to write database format version file to disk"); + + info!( + ?running_version, + ?disk_version, + "marked database format as newly created" + ); + } + + /// Mark the database as upgraded to `format_upgrade_version`. + /// + /// This should be called when an older database is opened by an older Zebra version, + /// after each version upgrade is complete. + /// + /// # Concurrency + /// + /// The version must only be updated while RocksDB is holding the database + /// directory lock. This prevents multiple Zebra instances corrupting the version + /// file. + /// + /// # Panics + /// + /// If the format should not have been upgraded, because the running version is: + /// - older than the disk version (that's a downgrade) + /// - the same as to the disk version (no upgrade needed) + /// + /// If the format should not have been upgraded, because the format upgrade version is: + /// - older or the same as the disk version + /// (multiple upgrades to the same version are not allowed) + /// - greater than the running version (that's a logic bug) + fn mark_as_upgraded_to(format_upgrade_version: &Version, config: &Config, network: Network) { + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file") + .expect("tried to upgrade a newly created database"); + let running_version = database_format_version_in_code(); + + assert!( + running_version > disk_version, + "can't upgrade a database that is being opened by an older or the same Zebra version:\n\ + disk: {disk_version}\n\ + upgrade: {format_upgrade_version}\n\ + running: {running_version}" + ); + + assert!( + format_upgrade_version > &disk_version, + "can't upgrade a database that has already been upgraded, or is newer:\n\ + disk: {disk_version}\n\ + upgrade: {format_upgrade_version}\n\ + running: {running_version}" + ); + + assert!( + format_upgrade_version <= &running_version, + "can't upgrade to a newer version than the running Zebra version:\n\ + disk: {disk_version}\n\ + upgrade: {format_upgrade_version}\n\ + running: {running_version}" + ); + + write_database_format_version_to_disk(format_upgrade_version, config, network) + .expect("unable to write database format version file to disk"); + + info!( + ?running_version, + ?format_upgrade_version, + ?disk_version, + "marked database format as upgraded" + ); + } + + /// Mark the database as downgraded to the running database version. + /// This should be called after a newer database is opened by an older Zebra version. + /// + /// # Concurrency + /// + /// The version must only be updated while RocksDB is holding the database + /// directory lock. This prevents multiple Zebra instances corrupting the version + /// file. + /// + /// # Panics + /// + /// If the format should have been upgraded, because the running version is newer. + /// If the state is newly created, because the running version should be the same. + /// + /// Multiple downgrades are allowed, because they all downgrade to the same running version. + fn mark_as_downgraded(config: &Config, network: Network) { + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file") + .expect("can't downgrade a newly created database"); + let running_version = database_format_version_in_code(); + + assert!( + disk_version >= running_version, + "can't downgrade a database that is being opened by a newer Zebra version:\n\ + disk: {disk_version}\n\ + running: {running_version}" + ); + + write_database_format_version_to_disk(&running_version, config, network) + .expect("unable to write database format version file to disk"); + + info!( + ?running_version, + ?disk_version, + "marked database format as downgraded" + ); + } +} + +impl DbFormatChangeThreadHandle { + /// Cancel the running format change thread, if this is the last handle. + /// Returns true if it was actually cancelled. + pub fn cancel_if_needed(&self) -> bool { + // # Correctness + // + // Checking the strong count has a race condition, because two handles can be dropped at + // the same time. + // + // If cancelling the thread is important, the owner of the handle must call force_cancel(). + if let Some(update_task) = self.update_task.as_ref() { + if Arc::strong_count(update_task) <= 1 { + self.force_cancel(); + return true; + } + } + + false + } + + /// Force the running format change thread to cancel, even if there are other handles. + pub fn force_cancel(&self) { + // There's nothing we can do about errors here. + // If the channel is disconnected, the task has exited. + // If it's full, it's already been cancelled. + let _ = self.cancel_handle.try_send(CancelFormatChange); + } + + /// Check for panics in the code running in the spawned thread. + /// If the thread exited with a panic, resume that panic. + /// + /// This method should be called regularly, so that panics are detected as soon as possible. + pub fn check_for_panics(&mut self) { + self.update_task.check_for_panics(); + } + + /// Wait for the spawned thread to finish. If it exited with a panic, resume that panic. + /// + /// Exits early if the thread has other outstanding handles. + /// + /// This method should be called during shutdown. + pub fn wait_for_panics(&mut self) { + self.update_task.wait_for_panics(); + } +} + +impl Drop for DbFormatChangeThreadHandle { + fn drop(&mut self) { + // Only cancel the format change if the state service is shutting down. + if self.cancel_if_needed() { + self.wait_for_panics(); + } else { + self.check_for_panics(); + } + } +} diff --git a/zebra-state/src/service/finalized_state/tests/prop.rs b/zebra-state/src/service/finalized_state/tests/prop.rs index 5893d4ff2ea..69fa9f40c85 100644 --- a/zebra-state/src/service/finalized_state/tests/prop.rs +++ b/zebra-state/src/service/finalized_state/tests/prop.rs @@ -9,7 +9,7 @@ use crate::{ config::Config, service::{ arbitrary::PreparedChain, - finalized_state::{FinalizedBlock, FinalizedState}, + finalized_state::{CheckpointVerifiedBlock, FinalizedState}, }, tests::FakeChainHelper, }; @@ -28,14 +28,14 @@ fn blocks_with_v5_transactions() -> Result<()> { let mut height = Height(0); // use `count` to minimize test failures, so they are easier to diagnose for block in chain.iter().take(count) { - let finalized = FinalizedBlock::from(block.block.clone()); - let hash = state.commit_finalized_direct( - finalized.into(), + let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone()); + let (hash, _) = state.commit_finalized_direct( + checkpoint_verified.into(), + None, "blocks_with_v5_transactions test" - ); + ).unwrap(); prop_assert_eq!(Some(height), state.finalized_tip_height()); - prop_assert_eq!(hash.unwrap(), block.hash); - // TODO: check that the nullifiers were correctly inserted (#2230) + prop_assert_eq!(hash, block.hash); height = Height(height.0 + 1); } }); @@ -84,18 +84,20 @@ fn all_upgrades_and_wrong_commitments_with_fake_activation_heights() -> Result<( h == nu5_height || h == nu5_height_plus1 => { let block = block.block.clone().set_block_commitment([0x42; 32]); - let finalized = FinalizedBlock::from(block); + let checkpoint_verified = CheckpointVerifiedBlock::from(block); state.commit_finalized_direct( - finalized.into(), + checkpoint_verified.into(), + None, "all_upgrades test" ).expect_err("Must fail commitment check"); failure_count += 1; }, _ => {}, } - let finalized = FinalizedBlock::from(block.block.clone()); - let hash = state.commit_finalized_direct( - finalized.into(), + let checkpoint_verified = CheckpointVerifiedBlock::from(block.block.clone()); + let (hash, _) = state.commit_finalized_direct( + checkpoint_verified.into(), + None, "all_upgrades test" ).unwrap(); prop_assert_eq!(Some(height), state.finalized_tip_height()); diff --git a/zebra-state/src/service/finalized_state/tests/vectors.rs b/zebra-state/src/service/finalized_state/tests/vectors.rs index 73f787ac67b..2390ed72edd 100644 --- a/zebra-state/src/service/finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/tests/vectors.rs @@ -1,16 +1,150 @@ //! Fixed test vectors for the finalized state. +//! These tests contain snapshots of the note commitment tree serialization format. +//! +//! We don't need to check empty trees, because the database format snapshot tests +//! use empty trees. -use halo2::pasta::{group::ff::PrimeField, pallas}; use hex::FromHex; +use rand::random; + +use halo2::pasta::{group::ff::PrimeField, pallas}; + +use zebra_chain::{ + orchard::{ + tree::legacy::LegacyNoteCommitmentTree as LegacyOrchardNoteCommitmentTree, + tree::NoteCommitmentTree as OrchardNoteCommitmentTree, + }, + sapling::{ + tree::legacy::LegacyNoteCommitmentTree as LegacySaplingNoteCommitmentTree, + tree::NoteCommitmentTree as SaplingNoteCommitmentTree, + }, + sprout::{ + tree::legacy::LegacyNoteCommitmentTree as LegacySproutNoteCommitmentTree, + tree::NoteCommitmentTree as SproutNoteCommitmentTree, + NoteCommitment as SproutNoteCommitment, + }, +}; use crate::service::finalized_state::disk_format::{FromDisk, IntoDisk}; -use zebra_chain::{orchard, sapling}; +/// Check that the sprout tree database serialization format has not changed. +#[test] +fn sprout_note_commitment_tree_serialization() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = SproutNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs + let hex_commitments = [ + "62fdad9bfbf17c38ea626a9c9b8af8a748e6b4367c8494caf0ca592999e8b6ba", + "68eb35bc5e1ddb80a761718e63a1ecf4d4977ae22cc19fa732b85515b2a4c943", + "836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb", + ]; + + for (idx, cm_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); + + let cm = SproutNoteCommitment::from(bytes); + incremental_tree.append(cm).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010200836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb019f5b2b1e4bf7e7318d0a1f417ca6bca36077025b3d11e074b94cd55ce9f3861801c45297124f50dcd3f78eed017afd1e30764cd74cdf0a57751978270fd0721359"; + + sprout_checks(incremental_tree, expected_serialized_tree_hex); +} + +/// Check that the sprout tree database serialization format has not changed for one commitment. +#[test] +fn sprout_note_commitment_tree_serialization_one() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = SproutNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs + let hex_commitments = ["836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb"]; + + for (idx, cm_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); + + let cm = SproutNoteCommitment::from(bytes); + incremental_tree.append(cm).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010000836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb000193e5f97ce1d5d94d0c6e1b66a4a262c9ae89e56e28f3f6e4a557b6fb70e173a8"; + + sprout_checks(incremental_tree, expected_serialized_tree_hex); +} + +/// Check that the sprout tree database serialization format has not changed when the number of +/// commitments is a power of two. +/// +/// Some trees have special handling for even numbers of roots, or powers of two, +/// so we also check that case. +#[test] +fn sprout_note_commitment_tree_serialization_pow2() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = SproutNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sprout/tests/test_vectors.rs + let hex_commitments = [ + "62fdad9bfbf17c38ea626a9c9b8af8a748e6b4367c8494caf0ca592999e8b6ba", + "68eb35bc5e1ddb80a761718e63a1ecf4d4977ae22cc19fa732b85515b2a4c943", + "836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb", + "92498a8295ea36d593eaee7cb8b55be3a3e37b8185d3807693184054cd574ae4", + ]; + + for (idx, cm_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_hex).unwrap(); + + let cm = SproutNoteCommitment::from(bytes); + incremental_tree.append(cm).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010301836045484077cf6390184ea7cd48b460e2d0f22b2293b69633bb152314a692fb92498a8295ea36d593eaee7cb8b55be3a3e37b8185d3807693184054cd574ae4019f5b2b1e4bf7e7318d0a1f417ca6bca36077025b3d11e074b94cd55ce9f3861801b61f588fcba9cea79e94376adae1c49583f716d2f20367141f1369a235b95c98"; + + sprout_checks(incremental_tree, expected_serialized_tree_hex); +} + +/// Check that the sapling tree database serialization format has not changed. #[test] fn sapling_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); - let mut incremental_tree = sapling::tree::NoteCommitmentTree::default(); + let mut incremental_tree = SaplingNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs let hex_commitments = [ @@ -24,7 +158,8 @@ fn sapling_note_commitment_tree_serialization() { let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); incremental_tree.append(cm_u).unwrap(); - if idx % 2 == 0 { + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); // Cache the root half of the time to make sure it works in both cases let _ = incremental_tree.root(); } @@ -37,19 +172,95 @@ fn sapling_note_commitment_tree_serialization() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "0102007c3ea01a6e3a3d90cf59cd789e467044b5cd78eb2c84cc6816f960746d0e036c0162324ff2c329e99193a74d28a585a3c167a93bf41a255135529c913bd9b1e66601ddaa1ab86de5c153993414f34ba97e9674c459dfadde112b89eeeafa0e5a204c"; - let serialized_tree = incremental_tree.as_bytes(); - assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = sapling::tree::NoteCommitmentTree::from_bytes(serialized_tree); + sapling_checks(incremental_tree, expected_serialized_tree_hex); +} - assert_eq!(incremental_tree.root(), deserialized_tree.root()); +/// Check that the sapling tree database serialization format has not changed for one commitment. +#[test] +fn sapling_note_commitment_tree_serialization_one() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = SaplingNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs + let hex_commitments = ["225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b11458"]; + + for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); + + let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + incremental_tree.append(cm_u).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010000225747f3b5d5dab4e5a424f81f85c904ff43286e0f3fd07ef0b8c6a627b1145800012c60c7de033d7539d123fb275011edfe08d57431676981d162c816372063bc71"; + + sapling_checks(incremental_tree, expected_serialized_tree_hex); } +/// Check that the sapling tree database serialization format has not changed when the number of +/// commitments is a power of two. +/// +/// Some trees have special handling for even numbers of roots, or powers of two, +/// so we also check that case. +#[test] +fn sapling_note_commitment_tree_serialization_pow2() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = SaplingNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/sapling/tests/test_vectors.rs + let hex_commitments = [ + "3a27fed5dbbc475d3880360e38638c882fd9b273b618fc433106896083f77446", + "c7ca8f7df8fd997931d33985d935ee2d696856cc09cc516d419ea6365f163008", + "f0fa37e8063b139d342246142fc48e7c0c50d0a62c97768589e06466742c3702", + "e6d4d7685894d01b32f7e081ab188930be6c2b9f76d6847b7f382e3dddd7c608", + "8cebb73be883466d18d3b0c06990520e80b936440a2c9fd184d92a1f06c4e826", + "22fab8bcdb88154dbf5877ad1e2d7f1b541bc8a5ec1b52266095381339c27c03", + "f43e3aac61e5a753062d4d0508c26ceaf5e4c0c58ba3c956e104b5d2cf67c41c", + "3a3661bc12b72646c94bc6c92796e81953985ee62d80a9ec3645a9a95740ac15", + ]; + + for (idx, cm_u_hex) in hex_commitments.iter().enumerate() { + let bytes = <[u8; 32]>::from_hex(cm_u_hex).unwrap(); + + let cm_u = jubjub::Fq::from_bytes(&bytes).unwrap(); + incremental_tree.append(cm_u).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "010701f43e3aac61e5a753062d4d0508c26ceaf5e4c0c58ba3c956e104b5d2cf67c41c3a3661bc12b72646c94bc6c92796e81953985ee62d80a9ec3645a9a95740ac15025991131c5c25911b35fcea2a8343e2dfd7a4d5b45493390e0cb184394d91c349002df68503da9247dfde6585cb8c9fa94897cf21735f8fc1b32116ef474de05c01d23765f3d90dfd97817ed6d995bd253d85967f77b9f1eaef6ecbcb0ef6796812"; + + sapling_checks(incremental_tree, expected_serialized_tree_hex); +} + +/// Check that the orchard tree database serialization format has not changed. #[test] fn orchard_note_commitment_tree_serialization() { let _init_guard = zebra_test::init(); - let mut incremental_tree = orchard::tree::NoteCommitmentTree::default(); + let mut incremental_tree = OrchardNoteCommitmentTree::default(); // Some commitments from zebra-chain/src/orchard/tests/tree.rs let commitments = [ @@ -73,7 +284,8 @@ fn orchard_note_commitment_tree_serialization() { for (idx, cm_x_bytes) in commitments.iter().enumerate() { let cm_x = pallas::Base::from_repr(*cm_x_bytes).unwrap(); incremental_tree.append(cm_x).unwrap(); - if idx % 2 == 0 { + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); // Cache the root half of the time to make sure it works in both cases let _ = incremental_tree.root(); } @@ -86,10 +298,237 @@ fn orchard_note_commitment_tree_serialization() { // The purpose of this test is to make sure the serialization format does // not change by accident. let expected_serialized_tree_hex = "010200ee9488053a30c596b43014105d3477e6f578c89240d1d1ee1743b77bb6adc40a01a34b69a4e4d9ccf954d46e5da1004d361a5497f511aeb4d481d23c0be177813301a0be6dab19bc2c65d8299258c16e14d48ec4d4959568c6412aa85763c222a702"; + + orchard_checks(incremental_tree, expected_serialized_tree_hex); +} + +/// Check that the orchard tree database serialization format has not changed for one commitment. +#[test] +fn orchard_note_commitment_tree_serialization_one() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = OrchardNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/orchard/tests/tree.rs + let commitments = [[ + 0x68, 0x13, 0x5c, 0xf4, 0x99, 0x33, 0x22, 0x90, 0x99, 0xa4, 0x4e, 0xc9, 0x9a, 0x75, 0xe1, + 0xe1, 0xcb, 0x46, 0x40, 0xf9, 0xb5, 0xbd, 0xec, 0x6b, 0x32, 0x23, 0x85, 0x6f, 0xea, 0x16, + 0x39, 0x0a, + ]]; + + for (idx, cm_x_bytes) in commitments.iter().enumerate() { + let cm_x = pallas::Base::from_repr(*cm_x_bytes).unwrap(); + incremental_tree.append(cm_x).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "01000068135cf49933229099a44ec99a75e1e1cb4640f9b5bdec6b3223856fea16390a000178afd4da59c541e9c2f317f9aff654f1fb38d14dc99431cbbfa93601c7068117"; + + orchard_checks(incremental_tree, expected_serialized_tree_hex); +} + +/// Check that the orchard tree database serialization format has not changed when the number of +/// commitments is a power of two. +/// +/// Some trees have special handling for even numbers of roots, or powers of two, +/// so we also check that case. +#[test] +fn orchard_note_commitment_tree_serialization_pow2() { + let _init_guard = zebra_test::init(); + + let mut incremental_tree = OrchardNoteCommitmentTree::default(); + + // Some commitments from zebra-chain/src/orchard/tests/tree.rs + let commitments = [ + [ + 0x78, 0x31, 0x50, 0x08, 0xfb, 0x29, 0x98, 0xb4, 0x30, 0xa5, 0x73, 0x1d, 0x67, 0x26, + 0x20, 0x7d, 0xc0, 0xf0, 0xec, 0x81, 0xea, 0x64, 0xaf, 0x5c, 0xf6, 0x12, 0x95, 0x69, + 0x01, 0xe7, 0x2f, 0x0e, + ], + [ + 0xee, 0x94, 0x88, 0x05, 0x3a, 0x30, 0xc5, 0x96, 0xb4, 0x30, 0x14, 0x10, 0x5d, 0x34, + 0x77, 0xe6, 0xf5, 0x78, 0xc8, 0x92, 0x40, 0xd1, 0xd1, 0xee, 0x17, 0x43, 0xb7, 0x7b, + 0xb6, 0xad, 0xc4, 0x0a, + ], + ]; + + for (idx, cm_x_bytes) in commitments.iter().enumerate() { + let cm_x = pallas::Base::from_repr(*cm_x_bytes).unwrap(); + incremental_tree.append(cm_x).unwrap(); + if random() { + info!(?idx, "randomly caching root for note commitment tree index"); + // Cache the root half of the time to make sure it works in both cases + let _ = incremental_tree.root(); + } + } + + // Make sure the last root is cached + let _ = incremental_tree.root(); + + // This test vector was generated by the code itself. + // The purpose of this test is to make sure the serialization format does + // not change by accident. + let expected_serialized_tree_hex = "01010178315008fb2998b430a5731d6726207dc0f0ec81ea64af5cf612956901e72f0eee9488053a30c596b43014105d3477e6f578c89240d1d1ee1743b77bb6adc40a0001d3d525931005e45f5a29bc82524e871e5ee1b6d77839deb741a6e50cd99fdf1a"; + + orchard_checks(incremental_tree, expected_serialized_tree_hex); +} + +fn sprout_checks(incremental_tree: SproutNoteCommitmentTree, expected_serialized_tree_hex: &str) { let serialized_tree = incremental_tree.as_bytes(); + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); - let deserialized_tree = orchard::tree::NoteCommitmentTree::from_bytes(serialized_tree); + let deserialized_tree = SproutNoteCommitmentTree::from_bytes(&serialized_tree); + + // Get a legacy deserialized tree from the deserialized tree. + let deserialized_legacy_tree = LegacySproutNoteCommitmentTree::from(deserialized_tree.clone()); + + // Get a deserialized tree from a legacy deserialized tree. + let deserialized_legacy_tree_as_new = deserialized_legacy_tree.into(); + + // Check frontiers are the same. + incremental_tree.assert_frontier_eq(&deserialized_tree); + incremental_tree.assert_frontier_eq(&deserialized_legacy_tree_as_new); + // Check cached roots are the same. assert_eq!(incremental_tree.root(), deserialized_tree.root()); + assert_eq!( + incremental_tree.root(), + deserialized_legacy_tree_as_new.root() + ); + + // Check recalculated roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree.recalculate_root() + ); + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_legacy_tree_as_new.recalculate_root() + ); + + // Check reclaculated and cached roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree + .cached_root() + .expect("cached root was serialized") + ); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + let re_serialized_legacy_tree = deserialized_legacy_tree_as_new.as_bytes(); + + assert_eq!(serialized_tree, re_serialized_tree); + assert_eq!(re_serialized_legacy_tree, re_serialized_tree); +} + +fn sapling_checks(incremental_tree: SaplingNoteCommitmentTree, expected_serialized_tree_hex: &str) { + let serialized_tree = incremental_tree.as_bytes(); + + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = SaplingNoteCommitmentTree::from_bytes(&serialized_tree); + + // Get a legacy deserialized tree from the deserialized tree. + let deserialized_legacy_tree = LegacySaplingNoteCommitmentTree::from(deserialized_tree.clone()); + + // Get a deserialized tree from a legacy deserialized tree. + let deserialized_legacy_tree_as_new = deserialized_legacy_tree.into(); + + // Check frontiers are the same. + incremental_tree.assert_frontier_eq(&deserialized_tree); + incremental_tree.assert_frontier_eq(&deserialized_legacy_tree_as_new); + + // Check cached roots are the same. + assert_eq!(incremental_tree.root(), deserialized_tree.root()); + assert_eq!( + incremental_tree.root(), + deserialized_legacy_tree_as_new.root() + ); + + // Check recalculated roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree.recalculate_root() + ); + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_legacy_tree_as_new.recalculate_root() + ); + + // Check reclaculated and cached roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree + .cached_root() + .expect("cached root was serialized") + ); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + let re_serialized_legacy_tree = deserialized_legacy_tree_as_new.as_bytes(); + + assert_eq!(serialized_tree, re_serialized_tree); + assert_eq!(re_serialized_legacy_tree, re_serialized_tree); +} + +fn orchard_checks(incremental_tree: OrchardNoteCommitmentTree, expected_serialized_tree_hex: &str) { + let serialized_tree = incremental_tree.as_bytes(); + + assert_eq!(hex::encode(&serialized_tree), expected_serialized_tree_hex); + + let deserialized_tree = OrchardNoteCommitmentTree::from_bytes(&serialized_tree); + + // Get a legacy deserialized tree from the deserialized tree. + let deserialized_legacy_tree = LegacyOrchardNoteCommitmentTree::from(deserialized_tree.clone()); + + // Get a deserialized tree from a legacy deserialized tree. + let deserialized_legacy_tree_as_new = deserialized_legacy_tree.into(); + + // Check frontiers are the same. + incremental_tree.assert_frontier_eq(&deserialized_tree); + incremental_tree.assert_frontier_eq(&deserialized_legacy_tree_as_new); + + // Check cached roots are the same. + assert_eq!(incremental_tree.root(), deserialized_tree.root()); + assert_eq!( + incremental_tree.root(), + deserialized_legacy_tree_as_new.root() + ); + + // Check recalculated roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree.recalculate_root() + ); + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_legacy_tree_as_new.recalculate_root() + ); + + // Check reclaculated and cached roots are the same + assert_eq!( + incremental_tree.recalculate_root(), + deserialized_tree + .cached_root() + .expect("cached root was serialized") + ); + + // Double-check that the internal format is the same by re-serializing the tree. + let re_serialized_tree = deserialized_tree.as_bytes(); + let re_serialized_legacy_tree = deserialized_legacy_tree_as_new.as_bytes(); + + assert_eq!(serialized_tree, re_serialized_tree); + assert_eq!(re_serialized_legacy_tree, re_serialized_tree); } diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index 8b6e261050a..63decfe10db 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -14,7 +14,14 @@ use std::path::Path; use zebra_chain::parameters::Network; use crate::{ - service::finalized_state::{disk_db::DiskDb, disk_format::block::MAX_ON_DISK_HEIGHT}, + config::{database_format_version_in_code, database_format_version_on_disk}, + service::finalized_state::{ + disk_db::DiskDb, + disk_format::{ + block::MAX_ON_DISK_HEIGHT, + upgrade::{DbFormatChange, DbFormatChangeThreadHandle}, + }, + }, Config, }; @@ -32,12 +39,20 @@ pub mod arbitrary; /// `rocksdb` allows concurrent writes through a shared reference, /// so database instances are cloneable. When the final clone is dropped, /// the database is closed. -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug)] pub struct ZebraDb { // Owned State // // Everything contained in this state must be shared by all clones, or read-only. // + /// A handle to a running format change task, which cancels the task when dropped. + /// + /// # Concurrency + /// + /// This field should be dropped before the database field, so the format upgrade task is + /// cancelled before the database is dropped. This helps avoid some kinds of deadlocks. + format_change_handle: Option, + /// The inner low-level database wrapper for the RocksDB database. db: DiskDb, } @@ -46,12 +61,49 @@ impl ZebraDb { /// Opens or creates the database at `config.path` for `network`, /// and returns a shared high-level typed database wrapper. pub fn new(config: &Config, network: Network) -> ZebraDb { - let db = ZebraDb { + let running_version = database_format_version_in_code(); + let disk_version = database_format_version_on_disk(config, network) + .expect("unable to read database format version file"); + + // Log any format changes before opening the database, in case opening fails. + let format_change = DbFormatChange::new(running_version, disk_version); + + // Open the database and do initial checks. + let mut db = ZebraDb { + format_change_handle: None, db: DiskDb::new(config, network), }; db.check_max_on_disk_tip_height(); + // We have to get this height before we spawn the upgrade task, because threads can take + // a while to start, and new blocks can be committed as soon as we return from this method. + let initial_tip_height = db.finalized_tip_height(); + + // Start any required format changes. + // + // TODO: should debug_stop_at_height wait for these upgrades, or not? + if let Some(format_change) = format_change { + // Launch the format change and install its handle in the database. + // + // `upgrade_db` is a special clone of the database, which can't be used to shut down + // the upgrade task. (Because the task hasn't been launched yet, + // `db.format_change_handle` is always None.) + // + // It can be a FinalizedState if needed, or the FinalizedState methods needed for + // upgrades can be moved to ZebraDb. + let upgrade_db = db.clone(); + + let format_change_handle = format_change.spawn_format_change( + config.clone(), + network, + initial_tip_height, + upgrade_db, + ); + + db.format_change_handle = Some(format_change_handle); + } + db } @@ -60,6 +112,19 @@ impl ZebraDb { self.db.path() } + /// Check for panics in code running in spawned threads. + /// If a thread exited with a panic, resume that panic. + /// + /// This method should be called regularly, so that panics are detected as soon as possible. + pub fn check_for_panics(&mut self) { + if let Some(format_change_handle) = self.format_change_handle.as_mut() { + format_change_handle.check_for_panics(); + } + + // This check doesn't panic, but we want to check it regularly anyway. + self.check_max_on_disk_tip_height(); + } + /// Shut down the database, cleaning up background tasks and ephemeral data. /// /// If `force` is true, clean up regardless of any shared references. @@ -68,7 +133,19 @@ impl ZebraDb { /// /// See [`DiskDb::shutdown`] for details. pub fn shutdown(&mut self, force: bool) { - self.check_max_on_disk_tip_height(); + // # Concurrency + // + // The format upgrade task should be cancelled before the database is flushed or shut down. + // This helps avoid some kinds of deadlocks. + // + // See also the correctness note in `DiskDb::shutdown()`. + if force || self.db.shared_database_owners() <= 1 { + if let Some(format_change_handle) = self.format_change_handle.as_mut() { + format_change_handle.force_cancel(); + } + } + + self.check_for_panics(); self.db.shutdown(force); } @@ -93,3 +170,9 @@ impl ZebraDb { } } } + +impl Drop for ZebraDb { + fn drop(&mut self) { + self.shutdown(false); + } +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index b6affaf1610..78cda842bd4 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -19,18 +19,19 @@ use itertools::Itertools; use zebra_chain::{ amount::NonNegative, block::{self, Block, Height}, - history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees, parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, sapling, serialization::TrustedPreallocate, + sprout, transaction::{self, Transaction}, transparent, value_balance::ValueBalance, }; use crate::{ + request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, disk_format::{ @@ -38,9 +39,8 @@ use crate::{ transparent::{AddressBalanceLocation, OutputLocation}, }, zebra_db::{metrics::block_precommit_metrics, ZebraDb}, - FinalizedBlock, }, - BoxError, HashOrHeight, + BoxError, HashOrHeight, SemanticallyVerifiedBlock, }; #[cfg(test)] @@ -149,34 +149,28 @@ impl ZebraDb { })) } - /// Returns the Sapling - /// [`NoteCommitmentTree`](sapling::tree::NoteCommitmentTree) specified by a - /// hash or height, if it exists in the finalized `db`. + /// Returns the Sapling [`note commitment tree`](sapling::tree::NoteCommitmentTree) specified by + /// a hash or height, if it exists in the finalized state. #[allow(clippy::unwrap_in_result)] - pub fn sapling_tree( + pub fn sapling_tree_by_hash_or_height( &self, hash_or_height: HashOrHeight, ) -> Option> { let height = hash_or_height.height_or_else(|hash| self.height(hash))?; - let sapling_tree_handle = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - - self.db.zs_get(&sapling_tree_handle, &height) + self.sapling_tree_by_height(&height) } - /// Returns the Orchard - /// [`NoteCommitmentTree`](orchard::tree::NoteCommitmentTree) specified by a - /// hash or height, if it exists in the finalized `db`. + /// Returns the Orchard [`note commitment tree`](orchard::tree::NoteCommitmentTree) specified by + /// a hash or height, if it exists in the finalized state. #[allow(clippy::unwrap_in_result)] - pub fn orchard_tree( + pub fn orchard_tree_by_hash_or_height( &self, hash_or_height: HashOrHeight, ) -> Option> { let height = hash_or_height.height_or_else(|hash| self.height(hash))?; - let orchard_tree_handle = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - - self.db.zs_get(&orchard_tree_handle, &height) + self.orchard_tree_by_height(&height) } // Read tip block methods @@ -282,15 +276,13 @@ impl ZebraDb { /// - Propagates any errors from updating history and note commitment trees pub(in super::super) fn write_block( &mut self, - finalized: FinalizedBlock, - history_tree: Arc, - note_commitment_trees: NoteCommitmentTrees, + finalized: SemanticallyVerifiedBlockWithTrees, + prev_note_commitment_trees: Option, network: Network, source: &str, ) -> Result { - let finalized_hash = finalized.hash; - let tx_hash_indexes: HashMap = finalized + .verified .transaction_hashes .iter() .enumerate() @@ -303,12 +295,13 @@ impl ZebraDb { // simplify the spent_utxos location lookup code, // and remove the extra new_outputs_by_out_loc argument let new_outputs_by_out_loc: BTreeMap = finalized + .verified .new_outputs .iter() - .map(|(outpoint, utxo)| { + .map(|(outpoint, ordered_utxo)| { ( - lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes), - utxo.clone(), + lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes), + ordered_utxo.utxo.clone(), ) }) .collect(); @@ -316,6 +309,7 @@ impl ZebraDb { // Get a list of the spent UTXOs, before we delete any from the database let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> = finalized + .verified .block .transactions .iter() @@ -327,11 +321,17 @@ impl ZebraDb { // Some utxos are spent in the same block, so they will be in // `tx_hash_indexes` and `new_outputs` self.output_location(&outpoint).unwrap_or_else(|| { - lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes) + lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes) }), self.utxo(&outpoint) .map(|ordered_utxo| ordered_utxo.utxo) - .or_else(|| finalized.new_outputs.get(&outpoint).cloned()) + .or_else(|| { + finalized + .verified + .new_outputs + .get(&outpoint) + .map(|ordered_utxo| ordered_utxo.utxo.clone()) + }) .expect("already checked UTXO was in state or block"), ) }) @@ -350,7 +350,13 @@ impl ZebraDb { // Get the transparent addresses with changed balances/UTXOs let changed_addresses: HashSet = spent_utxos_by_out_loc .values() - .chain(finalized.new_outputs.values()) + .chain( + finalized + .verified + .new_outputs + .values() + .map(|ordered_utxo| &ordered_utxo.utxo), + ) .filter_map(|utxo| utxo.output.address(network)) .unique() .collect(); @@ -366,22 +372,21 @@ impl ZebraDb { // In case of errors, propagate and do not write the batch. batch.prepare_block_batch( - &self.db, - finalized, + self, + &finalized, new_outputs_by_out_loc, spent_utxos_by_outpoint, spent_utxos_by_out_loc, address_balances, - history_tree, - note_commitment_trees, self.finalized_value_pool(), + prev_note_commitment_trees, )?; self.db.write(batch)?; tracing::trace!(?source, "committed block from"); - Ok(finalized_hash) + Ok(finalized.verified.hash) } } @@ -419,26 +424,19 @@ impl DiskWriteBatch { #[allow(clippy::too_many_arguments)] pub fn prepare_block_batch( &mut self, - db: &DiskDb, - finalized: FinalizedBlock, + zebra_db: &ZebraDb, + finalized: &SemanticallyVerifiedBlockWithTrees, new_outputs_by_out_loc: BTreeMap, spent_utxos_by_outpoint: HashMap, spent_utxos_by_out_loc: BTreeMap, address_balances: HashMap, - history_tree: Arc, - note_commitment_trees: NoteCommitmentTrees, value_pool: ValueBalance, + prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { - let FinalizedBlock { - block, - hash, - height, - .. - } = &finalized; - + let db = &zebra_db.db; // Commit block and transaction data. // (Transaction indexes, note commitments, and UTXOs are committed later.) - self.prepare_block_header_and_transaction_data_batch(db, &finalized)?; + self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; // # Consensus // @@ -449,28 +447,37 @@ impl DiskWriteBatch { // // By returning early, Zebra commits the genesis block and transaction data, // but it ignores the genesis UTXO and value pool updates. - if self.prepare_genesis_batch(db, &finalized) { + if self.prepare_genesis_batch(db, finalized) { return Ok(()); } // Commit transaction indexes self.prepare_transparent_transaction_batch( db, - &finalized, + &finalized.verified, &new_outputs_by_out_loc, &spent_utxos_by_outpoint, &spent_utxos_by_out_loc, address_balances, )?; - self.prepare_shielded_transaction_batch(db, &finalized)?; + self.prepare_shielded_transaction_batch(db, &finalized.verified)?; - self.prepare_note_commitment_batch(db, &finalized, note_commitment_trees, history_tree)?; + self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; // Commit UTXOs and value pools - self.prepare_chain_value_pools_batch(db, &finalized, spent_utxos_by_outpoint, value_pool)?; + self.prepare_chain_value_pools_batch( + db, + &finalized.verified, + spent_utxos_by_outpoint, + value_pool, + )?; // The block has passed contextual validation, so update the metrics - block_precommit_metrics(block, *hash, *height); + block_precommit_metrics( + &finalized.verified.block, + finalized.verified.hash, + finalized.verified.height, + ); Ok(()) } @@ -485,7 +492,7 @@ impl DiskWriteBatch { pub fn prepare_block_header_and_transaction_data_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &SemanticallyVerifiedBlock, ) -> Result<(), BoxError> { // Blocks let block_header_by_height = db.cf_handle("block_header_by_height").unwrap(); @@ -497,7 +504,7 @@ impl DiskWriteBatch { let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap(); let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap(); - let FinalizedBlock { + let SemanticallyVerifiedBlock { block, hash, height, @@ -531,25 +538,71 @@ impl DiskWriteBatch { Ok(()) } - /// If `finalized.block` is a genesis block, - /// prepare a database batch that finishes initializing the database, - /// and return `true` (without actually writing anything). + /// If `finalized.block` is a genesis block, prepares a database batch that finishes + /// initializing the database, and returns `true` without actually writing anything. /// - /// Since the genesis block's transactions are skipped, - /// the returned genesis batch should be written to the database immediately. + /// Since the genesis block's transactions are skipped, the returned genesis batch should be + /// written to the database immediately. /// /// If `finalized.block` is not a genesis block, does nothing. /// - /// This method never returns an error. - pub fn prepare_genesis_batch(&mut self, db: &DiskDb, finalized: &FinalizedBlock) -> bool { - let FinalizedBlock { block, .. } = finalized; - - if block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { - self.prepare_genesis_note_commitment_tree_batch(db, finalized); - - return true; + /// # Panics + /// + /// If `finalized.block` is a genesis block, and a note commitment tree in `finalized` doesn't + /// match its corresponding empty tree. + pub fn prepare_genesis_batch( + &mut self, + db: &DiskDb, + finalized: &SemanticallyVerifiedBlockWithTrees, + ) -> bool { + if finalized.verified.block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { + assert_eq!( + *finalized.treestate.note_commitment_trees.sprout, + sprout::tree::NoteCommitmentTree::default(), + "The Sprout tree in the finalized block must match the empty Sprout tree." + ); + assert_eq!( + *finalized.treestate.note_commitment_trees.sapling, + sapling::tree::NoteCommitmentTree::default(), + "The Sapling tree in the finalized block must match the empty Sapling tree." + ); + assert_eq!( + *finalized.treestate.note_commitment_trees.orchard, + orchard::tree::NoteCommitmentTree::default(), + "The Orchard tree in the finalized block must match the empty Orchard tree." + ); + + // We want to store the trees of the genesis block together with their roots, and since + // the trees cache the roots after their computation, we trigger the computation. + // + // At the time of writing this comment, the roots are precomputed before this function + // is called, so the roots should already be cached. + finalized.treestate.note_commitment_trees.sprout.root(); + finalized.treestate.note_commitment_trees.sapling.root(); + finalized.treestate.note_commitment_trees.orchard.root(); + + // Insert the empty note commitment trees. Note that these can't be used too early + // (e.g. the Orchard tree before Nu5 activates) since the block validation will make + // sure only appropriate transactions are allowed in a block. + self.zs_insert( + &db.cf_handle("sprout_note_commitment_tree").unwrap(), + finalized.verified.height, + finalized.treestate.note_commitment_trees.sprout.clone(), + ); + self.zs_insert( + &db.cf_handle("sapling_note_commitment_tree").unwrap(), + finalized.verified.height, + finalized.treestate.note_commitment_trees.sapling.clone(), + ); + self.zs_insert( + &db.cf_handle("orchard_note_commitment_tree").unwrap(), + finalized.verified.height, + finalized.treestate.note_commitment_trees.orchard.clone(), + ); + + true + } else { + false } - - false } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs index f40b40156b8..2754cd69c3a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs @@ -197,7 +197,7 @@ fn test_block_and_transaction_data_with_network(network: Network) { .expect("test data deserializes"); state - .commit_finalized_direct(block.into(), "snapshot tests") + .commit_finalized_direct(block.into(), None, "snapshot tests") .expect("test block is valid"); let mut settings = insta::Settings::clone_current(); @@ -217,11 +217,13 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { if let Some((max_height, tip_block_hash)) = tip { // Check that the database returns empty note commitment trees for the // genesis block. + // + // We only store the sprout tree for the tip by height, so we can't check sprout here. let sapling_tree = state - .sapling_note_commitment_tree_by_height(&block::Height::MIN) + .sapling_tree_by_height(&block::Height::MIN) .expect("the genesis block in the database has a Sapling tree"); let orchard_tree = state - .orchard_note_commitment_tree_by_height(&block::Height::MIN) + .orchard_tree_by_height(&block::Height::MIN) .expect("the genesis block in the database has an Orchard tree"); assert_eq!(*sapling_tree, sapling::tree::NoteCommitmentTree::default()); @@ -241,11 +243,13 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // Shielded + let stored_sprout_trees = state.sprout_trees_full_map(); let mut stored_sapling_trees = Vec::new(); let mut stored_orchard_trees = Vec::new(); - let sapling_tree_at_tip = state.sapling_note_commitment_tree(); - let orchard_tree_at_tip = state.orchard_note_commitment_tree(); + let sprout_tree_at_tip = state.sprout_tree(); + let sapling_tree_at_tip = state.sapling_tree(); + let orchard_tree_at_tip = state.orchard_tree(); // Test the history tree. // @@ -253,7 +257,6 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // test the rest of the chain data (value balance). let history_tree_at_tip = state.history_tree(); - // TODO: split out block snapshots into their own function (#3151) for query_height in 0..=max_height.0 { let query_height = Height(query_height); @@ -269,14 +272,16 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { .block(query_height.into()) .expect("heights up to tip have blocks"); - // Check the sapling and orchard note commitment trees. + // Check the shielded note commitment trees. + // + // We only store the sprout tree for the tip by height, so we can't check sprout here. // - // TODO: test the rest of the shielded data (anchors, nullifiers, sprout) + // TODO: test the rest of the shielded data (anchors, nullifiers) let sapling_tree_by_height = state - .sapling_note_commitment_tree_by_height(&query_height) + .sapling_tree_by_height(&query_height) .expect("heights up to tip have Sapling trees"); let orchard_tree_by_height = state - .orchard_note_commitment_tree_by_height(&query_height) + .orchard_tree_by_height(&query_height) .expect("heights up to tip have Orchard trees"); // We don't need to snapshot the heights, @@ -298,6 +303,18 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { if query_height == max_height { assert_eq!(stored_block_hash, tip_block_hash); + // We only store the sprout tree for the tip by height, + // so the sprout check is less strict. + // We enforce the tip tree order by snapshotting it as well. + if let Some(stored_tree) = stored_sprout_trees.get(&sprout_tree_at_tip.root()) { + assert_eq!( + &sprout_tree_at_tip, stored_tree, + "unexpected missing sprout tip tree:\n\ + all trees: {stored_sprout_trees:?}" + ); + } else { + assert_eq!(sprout_tree_at_tip, Default::default()); + } assert_eq!(sapling_tree_at_tip, sapling_tree_by_height); assert_eq!(orchard_tree_at_tip, orchard_tree_by_height); @@ -428,6 +445,14 @@ fn snapshot_block_and_transaction_data(state: &FinalizedState) { // These snapshots will change if the trees do not have cached roots. // But we expect them to always have cached roots, // because those roots are used to populate the anchor column families. + insta::assert_ron_snapshot!("sprout_tree_at_tip", sprout_tree_at_tip); + insta::assert_ron_snapshot!( + "sprout_trees", + stored_sprout_trees, + { + "." => insta::sorted_redaction() + } + ); insta::assert_ron_snapshot!("sapling_trees", stored_sapling_trees); insta::assert_ron_snapshot!("orchard_trees", stored_orchard_trees); diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap index 407ca2ec20a..949d551263c 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_1.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap index 42bf130f51c..10b343c74fa 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@mainnet_2.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap index 407ca2ec20a..949d551263c 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_1.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap index 42bf130f51c..10b343c74fa 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/orchard_trees@testnet_2.snap @@ -7,7 +7,9 @@ expression: stored_orchard_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Base( + bytes: (174, 41, 53, 241, 223, 216, 162, 74, 237, 124, 112, 223, 125, 227, 166, 104, 235, 122, 73, 177, 49, 152, 128, 221, 226, 187, 217, 3, 26, 229, 216, 47), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap index fd27c14835e..268442af99a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_1.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap index 056e581b74f..0655ffbe372 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@mainnet_2.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap index fd27c14835e..268442af99a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_1.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap index 056e581b74f..0655ffbe372 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sapling_trees@testnet_2.snap @@ -7,7 +7,9 @@ expression: stored_sapling_trees inner: Frontier( frontier: None, ), - cached_root: None, + cached_root: Some(Root(Fq( + bytes: (251, 194, 244, 48, 12, 1, 240, 183, 130, 13, 0, 227, 52, 124, 141, 164, 238, 97, 70, 116, 55, 108, 188, 69, 53, 157, 170, 84, 249, 181, 73, 62), + ))), )), (Height(1), NoteCommitmentTree( inner: Frontier( diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_1.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@mainnet_2.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_1.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap new file mode 100644 index 00000000000..b18353999df --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_tree_at_tip@testnet_2.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: sprout_tree_at_tip +--- +NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), +) diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap new file mode 100644 index 00000000000..fc004eddd5a --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap @@ -0,0 +1,5 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_1.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_2.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap new file mode 100644 index 00000000000..fc004eddd5a --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap @@ -0,0 +1,5 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_1.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap new file mode 100644 index 00000000000..438e0809a21 --- /dev/null +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_2.snap @@ -0,0 +1,12 @@ +--- +source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs +expression: stored_sprout_trees +--- +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs index 51bad905da2..ea4a623748f 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/vectors.rs @@ -27,7 +27,7 @@ use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS}; use crate::{ service::finalized_state::{disk_db::DiskWriteBatch, FinalizedState}, - Config, FinalizedBlock, + CheckpointVerifiedBlock, Config, }; /// Storage round-trip test for block and transaction data in the finalized state database. @@ -112,7 +112,7 @@ fn test_block_db_round_trip_with( original_block.clone().into() } else { // Fake a zero height - FinalizedBlock::with_hash_and_height( + CheckpointVerifiedBlock::with_hash_and_height( original_block.clone(), original_block.hash(), Height(0), diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index a9a63cb3fbf..7107717a466 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -21,12 +21,12 @@ use zebra_chain::{ }; use crate::{ + request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, - FinalizedBlock, }, - BoxError, + BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { @@ -70,15 +70,14 @@ impl DiskWriteBatch { pub fn prepare_history_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, - history_tree: Arc, + finalized: &SemanticallyVerifiedBlockWithTrees, ) -> Result<(), BoxError> { let history_tree_cf = db.cf_handle("history_tree").unwrap(); - let FinalizedBlock { height, .. } = finalized; + let height = finalized.verified.height; // Update the tree in state - let current_tip_height = *height - 1; + let current_tip_height = height - 1; if let Some(h) = current_tip_height { self.zs_delete(&history_tree_cf, h); } @@ -88,7 +87,7 @@ impl DiskWriteBatch { // Otherwise, the ReadStateService could access a height // that was just deleted by a concurrent StateService write. // This requires a database version update. - if let Some(history_tree) = history_tree.as_ref().as_ref() { + if let Some(history_tree) = finalized.treestate.history_tree.as_ref().as_ref() { self.zs_insert(&history_tree_cf, height, history_tree); } @@ -108,13 +107,13 @@ impl DiskWriteBatch { pub fn prepare_chain_value_pools_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &SemanticallyVerifiedBlock, utxos_spent_by_block: HashMap, value_pool: ValueBalance, ) -> Result<(), BoxError> { let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap(); - let FinalizedBlock { block, .. } = finalized; + let SemanticallyVerifiedBlock { block, .. } = finalized; let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?; self.zs_insert(&tip_chain_value_pool, (), new_pool); diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 42803585e49..c3cf3666423 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -12,20 +12,20 @@ //! The [`crate::constants::DATABASE_FORMAT_VERSION`] constant must //! be incremented each time the database format (column, serialization, etc) changes. -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use zebra_chain::{ - block::Height, history_tree::HistoryTree, orchard, parallel::tree::NoteCommitmentTrees, - sapling, sprout, transaction::Transaction, + block::Height, orchard, parallel::tree::NoteCommitmentTrees, sapling, sprout, + transaction::Transaction, }; use crate::{ + request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, zebra_db::ZebraDb, - FinalizedBlock, }, - BoxError, + BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { @@ -70,7 +70,7 @@ impl ZebraDb { /// Returns the Sprout note commitment tree of the finalized tip /// or the empty tree if the state is empty. - pub fn sprout_note_commitment_tree(&self) -> Arc { + pub fn sprout_tree(&self) -> Arc { let height = match self.finalized_tip_height() { Some(h) => h, None => return Default::default(), @@ -88,7 +88,7 @@ impl ZebraDb { /// /// This is used for interstitial tree building, which is unique to Sprout. #[allow(clippy::unwrap_in_result)] - pub fn sprout_note_commitment_tree_by_anchor( + pub fn sprout_tree_by_anchor( &self, sprout_anchor: &sprout::tree::Root, ) -> Option> { @@ -99,69 +99,113 @@ impl ZebraDb { .map(Arc::new) } + /// Returns all the Sprout note commitment trees in the database. + /// + /// Calling this method can load a lot of data into RAM, and delay block commit transactions. + #[allow(dead_code, clippy::unwrap_in_result)] + pub fn sprout_trees_full_map( + &self, + ) -> HashMap> { + let sprout_anchors_handle = self.db.cf_handle("sprout_anchors").unwrap(); + + self.db + .zs_items_in_range_unordered(&sprout_anchors_handle, ..) + } + /// Returns the Sapling note commitment tree of the finalized tip /// or the empty tree if the state is empty. - pub fn sapling_note_commitment_tree(&self) -> Arc { + pub fn sapling_tree(&self) -> Arc { let height = match self.finalized_tip_height() { Some(h) => h, None => return Default::default(), }; - let sapling_nct_handle = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - - self.db - .zs_get(&sapling_nct_handle, &height) - .map(Arc::new) + self.sapling_tree_by_height(&height) .expect("Sapling note commitment tree must exist if there is a finalized tip") } - /// Returns the Sapling note commitment tree matching the given block height. - #[allow(dead_code)] + /// Returns the Sapling note commitment tree matching the given block height, + /// or `None` if the height is above the finalized tip. #[allow(clippy::unwrap_in_result)] - pub fn sapling_note_commitment_tree_by_height( + pub fn sapling_tree_by_height( &self, height: &Height, ) -> Option> { + let tip_height = self.finalized_tip_height()?; + + // If we're above the tip, searching backwards would always return the tip tree. + // But the correct answer is "we don't know that tree yet". + if *height > tip_height { + return None; + } + let sapling_trees = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - self.db.zs_get(&sapling_trees, height).map(Arc::new) + // If we know there must be a tree, search backwards for it. + // + // # Compatibility + // + // Allow older Zebra versions to read future database formats, after note commitment trees + // have been deduplicated. See ticket #6642 for details. + let (_first_duplicate_height, tree) = self + .db + .zs_prev_key_value_back_from(&sapling_trees, height) + .expect( + "Sapling note commitment trees must exist for all heights below the finalized tip", + ); + + Some(Arc::new(tree)) } /// Returns the Orchard note commitment tree of the finalized tip /// or the empty tree if the state is empty. - pub fn orchard_note_commitment_tree(&self) -> Arc { + pub fn orchard_tree(&self) -> Arc { let height = match self.finalized_tip_height() { Some(h) => h, None => return Default::default(), }; - let orchard_nct_handle = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - - self.db - .zs_get(&orchard_nct_handle, &height) - .map(Arc::new) + self.orchard_tree_by_height(&height) .expect("Orchard note commitment tree must exist if there is a finalized tip") } - /// Returns the Orchard note commitment tree matching the given block height. - #[allow(dead_code)] + /// Returns the Orchard note commitment tree matching the given block height, + /// or `None` if the height is above the finalized tip. #[allow(clippy::unwrap_in_result)] - pub fn orchard_note_commitment_tree_by_height( + pub fn orchard_tree_by_height( &self, height: &Height, ) -> Option> { + let tip_height = self.finalized_tip_height()?; + + // If we're above the tip, searching backwards would always return the tip tree. + // But the correct answer is "we don't know that tree yet". + if *height > tip_height { + return None; + } + let orchard_trees = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - self.db.zs_get(&orchard_trees, height).map(Arc::new) + // # Compatibility + // + // Allow older Zebra versions to read future database formats. See ticket #6642 for details. + let (_first_duplicate_height, tree) = self + .db + .zs_prev_key_value_back_from(&orchard_trees, height) + .expect( + "Orchard note commitment trees must exist for all heights below the finalized tip", + ); + + Some(Arc::new(tree)) } /// Returns the shielded note commitment trees of the finalized tip /// or the empty trees if the state is empty. pub fn note_commitment_trees(&self) -> NoteCommitmentTrees { NoteCommitmentTrees { - sprout: self.sprout_note_commitment_tree(), - sapling: self.sapling_note_commitment_tree(), - orchard: self.orchard_note_commitment_tree(), + sprout: self.sprout_tree(), + sapling: self.sapling_tree(), + orchard: self.orchard_tree(), } } } @@ -179,9 +223,9 @@ impl DiskWriteBatch { pub fn prepare_shielded_transaction_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &SemanticallyVerifiedBlock, ) -> Result<(), BoxError> { - let FinalizedBlock { block, .. } = finalized; + let SemanticallyVerifiedBlock { block, .. } = finalized; // Index each transaction's shielded data for transaction in &block.transactions { @@ -231,98 +275,67 @@ impl DiskWriteBatch { /// /// - Propagates any errors from updating the history tree #[allow(clippy::unwrap_in_result)] - pub fn prepare_note_commitment_batch( + pub fn prepare_trees_batch( &mut self, - db: &DiskDb, - finalized: &FinalizedBlock, - note_commitment_trees: NoteCommitmentTrees, - history_tree: Arc, + zebra_db: &ZebraDb, + finalized: &SemanticallyVerifiedBlockWithTrees, + prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { + let db = &zebra_db.db; + let sprout_anchors = db.cf_handle("sprout_anchors").unwrap(); let sapling_anchors = db.cf_handle("sapling_anchors").unwrap(); let orchard_anchors = db.cf_handle("orchard_anchors").unwrap(); - let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); - let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); - let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); + let sprout_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); + let sapling_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); + let orchard_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let FinalizedBlock { height, .. } = finalized; + let height = finalized.verified.height; + let trees = finalized.treestate.note_commitment_trees.clone(); // Use the cached values that were previously calculated in parallel. - let sprout_root = note_commitment_trees.sprout.root(); - let sapling_root = note_commitment_trees.sapling.root(); - let orchard_root = note_commitment_trees.orchard.root(); + let sprout_root = trees.sprout.root(); + let sapling_root = trees.sapling.root(); + let orchard_root = trees.orchard.root(); // Index the new anchors. // Note: if the root hasn't changed, we write the same value again. - self.zs_insert(&sprout_anchors, sprout_root, ¬e_commitment_trees.sprout); + self.zs_insert(&sprout_anchors, sprout_root, &trees.sprout); self.zs_insert(&sapling_anchors, sapling_root, ()); self.zs_insert(&orchard_anchors, orchard_root, ()); // Delete the previously stored Sprout note commitment tree. - let current_tip_height = *height - 1; + let current_tip_height = height - 1; if let Some(h) = current_tip_height { - self.zs_delete(&sprout_note_commitment_tree_cf, h); + self.zs_delete(&sprout_tree_cf, h); } // TODO: if we ever need concurrent read-only access to the sprout tree, // store it by `()`, not height. Otherwise, the ReadStateService could // access a height that was just deleted by a concurrent StateService // write. This requires a database version update. - self.zs_insert( - &sprout_note_commitment_tree_cf, - height, - note_commitment_trees.sprout, - ); - - self.zs_insert( - &sapling_note_commitment_tree_cf, - height, - note_commitment_trees.sapling, - ); - - self.zs_insert( - &orchard_note_commitment_tree_cf, - height, - note_commitment_trees.orchard, - ); - - self.prepare_history_batch(db, finalized, history_tree) - } + self.zs_insert(&sprout_tree_cf, height, trees.sprout); + + // Store the Sapling tree only if it is not already present at the previous height. + if height.is_min() + || prev_note_commitment_trees + .as_ref() + .map_or_else(|| zebra_db.sapling_tree(), |trees| trees.sapling.clone()) + != trees.sapling + { + self.zs_insert(&sapling_tree_cf, height, trees.sapling); + } - /// Prepare a database batch containing the initial note commitment trees, - /// and return it (without actually writing anything). - /// - /// This method never returns an error. - pub fn prepare_genesis_note_commitment_tree_batch( - &mut self, - db: &DiskDb, - finalized: &FinalizedBlock, - ) { - let sprout_note_commitment_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); - let sapling_note_commitment_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); - let orchard_note_commitment_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - - let FinalizedBlock { height, .. } = finalized; - - // Insert empty note commitment trees. Note that these can't be - // used too early (e.g. the Orchard tree before Nu5 activates) - // since the block validation will make sure only appropriate - // transactions are allowed in a block. - self.zs_insert( - &sprout_note_commitment_tree_cf, - height, - sprout::tree::NoteCommitmentTree::default(), - ); - self.zs_insert( - &sapling_note_commitment_tree_cf, - height, - sapling::tree::NoteCommitmentTree::default(), - ); - self.zs_insert( - &orchard_note_commitment_tree_cf, - height, - orchard::tree::NoteCommitmentTree::default(), - ); + // Store the Orchard tree only if it is not already present at the previous height. + if height.is_min() + || prev_note_commitment_trees + .map_or_else(|| zebra_db.orchard_tree(), |trees| trees.orchard) + != trees.orchard + { + self.zs_insert(&orchard_tree_cf, height, trees.orchard); + } + + self.prepare_history_batch(db, finalized) } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 6e2ac9808b4..9eda37a8888 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -35,7 +35,7 @@ use crate::{ }, zebra_db::ZebraDb, }, - BoxError, FinalizedBlock, + BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { @@ -369,13 +369,13 @@ impl DiskWriteBatch { pub fn prepare_transparent_transaction_batch( &mut self, db: &DiskDb, - finalized: &FinalizedBlock, + finalized: &SemanticallyVerifiedBlock, new_outputs_by_out_loc: &BTreeMap, spent_utxos_by_outpoint: &HashMap, spent_utxos_by_out_loc: &BTreeMap, mut address_balances: HashMap, ) -> Result<(), BoxError> { - let FinalizedBlock { block, height, .. } = finalized; + let SemanticallyVerifiedBlock { block, height, .. } = finalized; // Update created and spent transparent outputs self.prepare_new_transparent_outputs_batch( diff --git a/zebra-state/src/service/non_finalized_state.rs b/zebra-state/src/service/non_finalized_state.rs index 9beed6b1f0e..6b303360b6f 100644 --- a/zebra-state/src/service/non_finalized_state.rs +++ b/zebra-state/src/service/non_finalized_state.rs @@ -16,9 +16,9 @@ use zebra_chain::{ use crate::{ constants::MAX_NON_FINALIZED_CHAIN_FORKS, - request::{ContextuallyValidBlock, FinalizedWithTrees}, + request::{ContextuallyVerifiedBlock, FinalizableBlock}, service::{check, finalized_state::ZebraDb}, - PreparedBlock, ValidateContextError, + SemanticallyVerifiedBlock, ValidateContextError, }; mod chain; @@ -36,6 +36,8 @@ pub(crate) use chain::Chain; /// /// Most chain data is clone-on-write using [`Arc`]. pub struct NonFinalizedState { + // Chain Data + // /// Verified, non-finalized chains, in ascending work order. /// /// The best chain is [`NonFinalizedState::best_chain()`], or `chain_iter().next()`. @@ -43,6 +45,8 @@ pub struct NonFinalizedState { /// callers should migrate to `chain_iter().next()`. chain_set: BTreeSet>, + // Configuration + // /// The configured Zcash network. pub network: Network, @@ -174,7 +178,7 @@ impl NonFinalizedState { /// Finalize the lowest height block in the non-finalized portion of the best /// chain and update all side-chains to match. - pub fn finalize(&mut self) -> FinalizedWithTrees { + pub fn finalize(&mut self) -> FinalizableBlock { // Chain::cmp uses the partial cumulative work, and the hash of the tip block. // Neither of these fields has interior mutability. // (And when the tip block is dropped for a chain, the chain is also dropped.) @@ -226,7 +230,7 @@ impl NonFinalizedState { self.update_metrics_for_chains(); // Add the treestate to the finalized block. - FinalizedWithTrees::new(best_chain_root, root_treestate) + FinalizableBlock::new(best_chain_root, root_treestate) } /// Commit block to the non-finalized state, on top of: @@ -235,7 +239,7 @@ impl NonFinalizedState { #[tracing::instrument(level = "debug", skip(self, finalized_state, prepared))] pub fn commit_block( &mut self, - prepared: PreparedBlock, + prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { let parent_hash = prepared.block.header.previous_block_hash; @@ -266,7 +270,7 @@ impl NonFinalizedState { #[allow(clippy::unwrap_in_result)] pub fn commit_new_chain( &mut self, - prepared: PreparedBlock, + prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result<(), ValidateContextError> { let finalized_tip_height = finalized_state.finalized_tip_height(); @@ -280,9 +284,9 @@ impl NonFinalizedState { let chain = Chain::new( self.network, finalized_tip_height, - finalized_state.sprout_note_commitment_tree(), - finalized_state.sapling_note_commitment_tree(), - finalized_state.orchard_note_commitment_tree(), + finalized_state.sprout_tree(), + finalized_state.sapling_tree(), + finalized_state.orchard_tree(), finalized_state.history_tree(), finalized_state.finalized_value_pool(), ); @@ -308,7 +312,7 @@ impl NonFinalizedState { fn validate_and_commit( &self, new_chain: Arc, - prepared: PreparedBlock, + prepared: SemanticallyVerifiedBlock, finalized_state: &ZebraDb, ) -> Result, ValidateContextError> { // Reads from disk @@ -336,7 +340,7 @@ impl NonFinalizedState { ); // Quick check that doesn't read from disk - let contextual = ContextuallyValidBlock::with_block_and_spent_utxos( + let contextual = ContextuallyVerifiedBlock::with_block_and_spent_utxos( prepared.clone(), spent_utxos.clone(), ) @@ -358,7 +362,7 @@ impl NonFinalizedState { #[tracing::instrument(skip(new_chain, sprout_final_treestates))] fn validate_and_update_parallel( new_chain: Arc, - contextual: ContextuallyValidBlock, + contextual: ContextuallyVerifiedBlock, sprout_final_treestates: HashMap>, ) -> Result, ValidateContextError> { let mut block_commitment_result = None; @@ -399,6 +403,8 @@ impl NonFinalizedState { // Pushing a block onto a Chain can launch additional parallel batches. // TODO: should we pass _scope into Chain::push()? scope.spawn_fifo(|_scope| { + // TODO: Replace with Arc::unwrap_or_clone() when it stabilises: + // https://github.com/rust-lang/rust/issues/93610 let new_chain = Arc::try_unwrap(new_chain) .unwrap_or_else(|shared_chain| (*shared_chain).clone()); chain_push_result = Some(new_chain.push(contextual).map(Arc::new)); @@ -489,7 +495,7 @@ impl NonFinalizedState { /// Returns the block at the tip of the best chain. #[allow(dead_code)] - pub fn best_tip_block(&self) -> Option<&ContextuallyValidBlock> { + pub fn best_tip_block(&self) -> Option<&ContextuallyVerifiedBlock> { let best_chain = self.best_chain()?; best_chain.tip_block() @@ -549,7 +555,7 @@ impl NonFinalizedState { /// Return the non-finalized portion of the current best chain. pub fn best_chain(&self) -> Option<&Arc> { - self.chain_set.iter().rev().next() + self.chain_iter().next() } /// Return the number of chains. @@ -651,7 +657,7 @@ impl NonFinalizedState { // Update the chain count bar if self.chain_count_bar.is_none() { - self.chain_count_bar = Some(howudoin::new().label("Chain Forks")); + self.chain_count_bar = Some(howudoin::new_root().label("Chain Forks")); } let chain_count_bar = self @@ -662,9 +668,8 @@ impl NonFinalizedState { .best_chain() .map(|chain| chain.non_finalized_root_height().0 - 1); - chain_count_bar - .set_pos(u64::try_from(self.chain_count()).expect("fits in u64")) - .set_len(u64::try_from(MAX_NON_FINALIZED_CHAIN_FORKS).expect("fits in u64")); + chain_count_bar.set_pos(u64::try_from(self.chain_count()).expect("fits in u64")); + // .set_len(u64::try_from(MAX_NON_FINALIZED_CHAIN_FORKS).expect("fits in u64")); if let Some(finalized_tip_height) = finalized_tip_height { chain_count_bar.desc(format!("Finalized Root {finalized_tip_height}")); @@ -676,9 +681,11 @@ impl NonFinalizedState { match self.chain_count().cmp(&prev_length_bars) { Greater => self .chain_fork_length_bars - .resize_with(self.chain_count(), howudoin::new), + .resize_with(self.chain_count(), || { + howudoin::new_with_parent(chain_count_bar.id()) + }), Less => { - let redundant_bars = self.chain_fork_length_bars.split_off(prev_length_bars); + let redundant_bars = self.chain_fork_length_bars.split_off(self.chain_count()); for bar in redundant_bars { bar.close(); } @@ -701,24 +708,30 @@ impl NonFinalizedState { // - the chain this bar was previously assigned to might have changed position. chain_length_bar .label(format!("Fork {fork_height}")) - .set_pos(u64::try_from(chain.len()).expect("fits in u64")) - .set_len(u64::from( - zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY, - )); - - // display work as bits - let mut desc = format!( - "Work {:.1} bits", - chain.partial_cumulative_work.difficulty_bits_for_display(), - ); + .set_pos(u64::try_from(chain.len()).expect("fits in u64")); + // TODO: should this be MAX_BLOCK_REORG_HEIGHT? + // .set_len(u64::from( + // zebra_chain::transparent::MIN_TRANSPARENT_COINBASE_MATURITY, + // )); + + // TODO: store work in the finalized state for each height (#7109), + // and show the full chain work here, like `zcashd` (#7110) + // + // For now, we don't show any work here, see the deleted code in PR #7087. + let mut desc = String::new(); if let Some(recent_fork_height) = chain.recent_fork_height() { let recent_fork_length = chain .recent_fork_length() .expect("just checked recent fork height"); + let mut plural = "s"; + if recent_fork_length == 1 { + plural = ""; + } + desc.push_str(&format!( - " at {recent_fork_height:?} + {recent_fork_length} blocks" + " at {recent_fork_height:?} + {recent_fork_length} block{plural}" )); } diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 3913fc4d669..8400ae0a222 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -28,7 +28,7 @@ use zebra_chain::{ }; use crate::{ - request::Treestate, service::check, ContextuallyValidBlock, HashOrHeight, OutputLocation, + request::Treestate, service::check, ContextuallyVerifiedBlock, HashOrHeight, OutputLocation, TransactionLocation, ValidateContextError, }; @@ -50,7 +50,7 @@ pub struct Chain { // Blocks, heights, hashes, and transaction locations // /// The contextually valid blocks which form this non-finalized partial chain, in height order. - pub(crate) blocks: BTreeMap, + pub(crate) blocks: BTreeMap, /// An index of block heights for each block hash in `blocks`. pub height_by_hash: HashMap, @@ -318,10 +318,10 @@ impl Chain { /// /// If the block is invalid, drops this chain, and returns an error. /// - /// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until + /// Note: a [`ContextuallyVerifiedBlock`] isn't actually contextually valid until /// [`Self::update_chain_tip_with`] returns success. #[instrument(level = "debug", skip(self, block), fields(block = %block.block))] - pub fn push(mut self, block: ContextuallyValidBlock) -> Result { + pub fn push(mut self, block: ContextuallyVerifiedBlock) -> Result { // update cumulative data members self.update_chain_tip_with(&block)?; @@ -334,7 +334,7 @@ impl Chain { /// Pops the lowest height block of the non-finalized portion of a chain, /// and returns it with its associated treestate. #[instrument(level = "debug", skip(self))] - pub(crate) fn pop_root(&mut self) -> (ContextuallyValidBlock, Treestate) { + pub(crate) fn pop_root(&mut self) -> (ContextuallyVerifiedBlock, Treestate) { // Obtain the lowest height. let block_height = self.non_finalized_root_height(); @@ -388,9 +388,9 @@ impl Chain { self.network } - /// Returns the [`ContextuallyValidBlock`] with [`block::Hash`] or + /// Returns the [`ContextuallyVerifiedBlock`] with [`block::Hash`] or /// [`Height`](zebra_chain::block::Height), if it exists in this chain. - pub fn block(&self, hash_or_height: HashOrHeight) -> Option<&ContextuallyValidBlock> { + pub fn block(&self, hash_or_height: HashOrHeight) -> Option<&ContextuallyVerifiedBlock> { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; @@ -500,14 +500,25 @@ impl Chain { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; - self.sprout_trees_by_height.get(&height).cloned() + self.sprout_trees_by_height + .range(..=height) + .next_back() + .map(|(_height, tree)| tree.clone()) } - /// Add the Sprout `tree` to the tree and anchor indexes at `height`. + /// Adds the Sprout `tree` to the tree and anchor indexes at `height`. /// /// `height` can be either: + /// /// - the height of a new block that has just been added to the chain tip, or - /// - the finalized tip height: the height of the parent of the first block of a new chain. + /// - the finalized tip height—the height of the parent of the first block of a new chain. + /// + /// Stores only the first tree in each series of identical trees. + /// + /// # Panics + /// + /// - If there's a tree already stored at `height`. + /// - If there's an anchor already stored at `height`. fn add_sprout_tree_and_anchor( &mut self, height: Height, @@ -521,26 +532,25 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sprout tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] + // Don't add a new tree unless it differs from the previous one or there's no previous tree. + if height.is_min() + || self + .sprout_tree(height.previous().into()) + .map_or(true, |prev_tree| prev_tree != tree) { assert_eq!( self.sprout_trees_by_height.insert(height, tree.clone()), None, "incorrect overwrite of sprout tree: trees must be reverted then inserted", ); - assert_eq!( - self.sprout_anchors_by_height.insert(height, anchor), - None, - "incorrect overwrite of sprout anchor: anchors must be reverted then inserted", - ); } - #[cfg(test)] - { - self.sprout_trees_by_height.insert(height, tree.clone()); - self.sprout_anchors_by_height.insert(height, anchor); - } + // Store the root. + assert_eq!( + self.sprout_anchors_by_height.insert(height, anchor), + None, + "incorrect overwrite of sprout anchor: anchors must be reverted then inserted", + ); // Multiple inserts are expected here, // because the anchors only change if a block has shielded transactions. @@ -548,24 +558,35 @@ impl Chain { self.sprout_trees_by_anchor.insert(anchor, tree); } - /// Remove the Sprout tree and anchor indexes at `height`. + /// Removes the Sprout tree and anchor indexes at `height`. /// /// `height` can be at two different [`RevertPosition`]s in the chain: - /// - a tip block above a chain fork: only that height is removed, or - /// - a root block: all trees and anchors below that height are removed, - /// including temporary finalized tip trees. + /// + /// - a tip block above a chain fork—only the tree and anchor at that height are removed, or + /// - a root block—all trees and anchors at and below that height are removed, including + /// temporary finalized tip trees. + /// + /// # Panics + /// + /// - If the anchor being removed is not present. + /// - If there is no tree at `height`. fn remove_sprout_tree_and_anchor(&mut self, position: RevertPosition, height: Height) { - let removed_heights: Vec = if position == RevertPosition::Root { - // Remove all trees and anchors at or below the removed block. - // This makes sure the temporary trees from finalized tip forks are removed. - self.sprout_anchors_by_height - .keys() - .cloned() - .filter(|index_height| *index_height <= height) - .collect() + let (removed_heights, highest_removed_tree) = if position == RevertPosition::Root { + ( + // Remove all trees and anchors at or below the removed block. + // This makes sure the temporary trees from finalized tip forks are removed. + self.sprout_anchors_by_height + .keys() + .cloned() + .filter(|index_height| *index_height <= height) + .collect(), + // Cache the highest (rightmost) tree before its removal. + self.sprout_tree(height.into()), + ) } else { // Just remove the reverted tip trees and anchors. - vec![height] + // We don't need to cache the highest (rightmost) tree. + (vec![height], None) }; for height in &removed_heights { @@ -573,9 +594,8 @@ impl Chain { .sprout_anchors_by_height .remove(height) .expect("Sprout anchor must be present if block was added to chain"); - self.sprout_trees_by_height - .remove(height) - .expect("Sprout note commitment tree must be present if block was added to chain"); + + self.sprout_trees_by_height.remove(height); trace!(?height, ?position, ?anchor, "removing sprout tree"); @@ -589,6 +609,26 @@ impl Chain { self.sprout_trees_by_anchor.remove(&anchor); } } + + // # Invariant + // + // The height following after the removed heights in a non-empty non-finalized state must + // always have its tree. + // + // The loop above can violate the invariant, and if `position` is [`RevertPosition::Root`], + // it will always violate the invariant. We restore the invariant by storing the highest + // (rightmost) removed tree just above `height` if there is no tree at that height. + if !self.is_empty() && height < self.non_finalized_tip_height() { + let next_height = height.next(); + + if self.sprout_trees_by_height.get(&next_height).is_none() { + // TODO: Use `try_insert` once it stabilises. + self.sprout_trees_by_height.insert( + next_height, + highest_removed_tree.expect("There should be a cached removed tree."), + ); + } + } } /// Returns the Sapling note commitment tree of the tip of this [`Chain`], @@ -617,14 +657,25 @@ impl Chain { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; - self.sapling_trees_by_height.get(&height).cloned() + self.sapling_trees_by_height + .range(..=height) + .next_back() + .map(|(_height, tree)| tree.clone()) } - /// Add the Sapling `tree` to the tree and anchor indexes at `height`. + /// Adds the Sapling `tree` to the tree and anchor indexes at `height`. /// /// `height` can be either: + /// /// - the height of a new block that has just been added to the chain tip, or - /// - the finalized tip height: the height of the parent of the first block of a new chain. + /// - the finalized tip height—the height of the parent of the first block of a new chain. + /// + /// Stores only the first tree in each series of identical trees. + /// + /// # Panics + /// + /// - If there's a tree already stored at `height`. + /// - If there's an anchor already stored at `height`. fn add_sapling_tree_and_anchor( &mut self, height: Height, @@ -633,50 +684,60 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding sapling tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] + // Don't add a new tree unless it differs from the previous one or there's no previous tree. + if height.is_min() + || self + .sapling_tree(height.previous().into()) + .map_or(true, |prev_tree| prev_tree != tree) { assert_eq!( self.sapling_trees_by_height.insert(height, tree), None, "incorrect overwrite of sapling tree: trees must be reverted then inserted", ); - assert_eq!( - self.sapling_anchors_by_height.insert(height, anchor), - None, - "incorrect overwrite of sapling anchor: anchors must be reverted then inserted", - ); } - #[cfg(test)] - { - self.sapling_trees_by_height.insert(height, tree); - self.sapling_anchors_by_height.insert(height, anchor); - } + // Store the root. + assert_eq!( + self.sapling_anchors_by_height.insert(height, anchor), + None, + "incorrect overwrite of sapling anchor: anchors must be reverted then inserted", + ); // Multiple inserts are expected here, // because the anchors only change if a block has shielded transactions. self.sapling_anchors.insert(anchor); } - /// Remove the Sapling tree and anchor indexes at `height`. + /// Removes the Sapling tree and anchor indexes at `height`. /// /// `height` can be at two different [`RevertPosition`]s in the chain: - /// - a tip block above a chain fork: only that height is removed, or - /// - a root block: all trees and anchors below that height are removed, - /// including temporary finalized tip trees. + /// + /// - a tip block above a chain fork—only the tree and anchor at that height are removed, or + /// - a root block—all trees and anchors at and below that height are removed, including + /// temporary finalized tip trees. + /// + /// # Panics + /// + /// - If the anchor being removed is not present. + /// - If there is no tree at `height`. fn remove_sapling_tree_and_anchor(&mut self, position: RevertPosition, height: Height) { - let removed_heights: Vec = if position == RevertPosition::Root { - // Remove all trees and anchors at or below the removed block. - // This makes sure the temporary trees from finalized tip forks are removed. - self.sapling_anchors_by_height - .keys() - .cloned() - .filter(|index_height| *index_height <= height) - .collect() + let (removed_heights, highest_removed_tree) = if position == RevertPosition::Root { + ( + // Remove all trees and anchors at or below the removed block. + // This makes sure the temporary trees from finalized tip forks are removed. + self.sapling_anchors_by_height + .keys() + .cloned() + .filter(|index_height| *index_height <= height) + .collect(), + // Cache the highest (rightmost) tree before its removal. + self.sapling_tree(height.into()), + ) } else { // Just remove the reverted tip trees and anchors. - vec![height] + // We don't need to cache the highest (rightmost) tree. + (vec![height], None) }; for height in &removed_heights { @@ -684,9 +745,8 @@ impl Chain { .sapling_anchors_by_height .remove(height) .expect("Sapling anchor must be present if block was added to chain"); - self.sapling_trees_by_height - .remove(height) - .expect("Sapling note commitment tree must be present if block was added to chain"); + + self.sapling_trees_by_height.remove(height); trace!(?height, ?position, ?anchor, "removing sapling tree"); @@ -697,6 +757,26 @@ impl Chain { "Sapling anchor must be present if block was added to chain" ); } + + // # Invariant + // + // The height following after the removed heights in a non-empty non-finalized state must + // always have its tree. + // + // The loop above can violate the invariant, and if `position` is [`RevertPosition::Root`], + // it will always violate the invariant. We restore the invariant by storing the highest + // (rightmost) removed tree just above `height` if there is no tree at that height. + if !self.is_empty() && height < self.non_finalized_tip_height() { + let next_height = height.next(); + + if self.sapling_trees_by_height.get(&next_height).is_none() { + // TODO: Use `try_insert` once it stabilises. + self.sapling_trees_by_height.insert( + next_height, + highest_removed_tree.expect("There should be a cached removed tree."), + ); + } + } } /// Returns the Orchard note commitment tree of the tip of this [`Chain`], @@ -726,14 +806,25 @@ impl Chain { let height = hash_or_height.height_or_else(|hash| self.height_by_hash.get(&hash).cloned())?; - self.orchard_trees_by_height.get(&height).cloned() + self.orchard_trees_by_height + .range(..=height) + .next_back() + .map(|(_height, tree)| tree.clone()) } - /// Add the Orchard `tree` to the tree and anchor indexes at `height`. + /// Adds the Orchard `tree` to the tree and anchor indexes at `height`. /// /// `height` can be either: + /// /// - the height of a new block that has just been added to the chain tip, or - /// - the finalized tip height: the height of the parent of the first block of a new chain. + /// - the finalized tip height—the height of the parent of the first block of a new chain. + /// + /// Stores only the first tree in each series of identical trees. + /// + /// # Panics + /// + /// - If there's a tree already stored at `height`. + /// - If there's an anchor already stored at `height`. fn add_orchard_tree_and_anchor( &mut self, height: Height, @@ -747,50 +838,60 @@ impl Chain { let anchor = tree.root(); trace!(?height, ?anchor, "adding orchard tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] + // Don't add a new tree unless it differs from the previous one or there's no previous tree. + if height.is_min() + || self + .orchard_tree(height.previous().into()) + .map_or(true, |prev_tree| prev_tree != tree) { assert_eq!( self.orchard_trees_by_height.insert(height, tree), None, "incorrect overwrite of orchard tree: trees must be reverted then inserted", ); - assert_eq!( - self.orchard_anchors_by_height.insert(height, anchor), - None, - "incorrect overwrite of orchard anchor: anchors must be reverted then inserted", - ); } - #[cfg(test)] - { - self.orchard_trees_by_height.insert(height, tree); - self.orchard_anchors_by_height.insert(height, anchor); - } + // Store the root. + assert_eq!( + self.orchard_anchors_by_height.insert(height, anchor), + None, + "incorrect overwrite of orchard anchor: anchors must be reverted then inserted", + ); // Multiple inserts are expected here, // because the anchors only change if a block has shielded transactions. self.orchard_anchors.insert(anchor); } - /// Remove the Orchard tree and anchor indexes at `height`. + /// Removes the Orchard tree and anchor indexes at `height`. /// /// `height` can be at two different [`RevertPosition`]s in the chain: - /// - a tip block above a chain fork: only that height is removed, or - /// - a root block: all trees and anchors below that height are removed, - /// including temporary finalized tip trees. + /// + /// - a tip block above a chain fork—only the tree and anchor at that height are removed, or + /// - a root block—all trees and anchors at and below that height are removed, including + /// temporary finalized tip trees. + /// + /// # Panics + /// + /// - If the anchor being removed is not present. + /// - If there is no tree at `height`. fn remove_orchard_tree_and_anchor(&mut self, position: RevertPosition, height: Height) { - let removed_heights: Vec = if position == RevertPosition::Root { - // Remove all trees and anchors at or below the removed block. - // This makes sure the temporary trees from finalized tip forks are removed. - self.orchard_anchors_by_height - .keys() - .cloned() - .filter(|index_height| *index_height <= height) - .collect() + let (removed_heights, highest_removed_tree) = if position == RevertPosition::Root { + ( + // Remove all trees and anchors at or below the removed block. + // This makes sure the temporary trees from finalized tip forks are removed. + self.orchard_anchors_by_height + .keys() + .cloned() + .filter(|index_height| *index_height <= height) + .collect(), + // Cache the highest (rightmost) tree before its removal. + self.orchard_tree(height.into()), + ) } else { // Just remove the reverted tip trees and anchors. - vec![height] + // We don't need to cache the highest (rightmost) tree. + (vec![height], None) }; for height in &removed_heights { @@ -798,9 +899,8 @@ impl Chain { .orchard_anchors_by_height .remove(height) .expect("Orchard anchor must be present if block was added to chain"); - self.orchard_trees_by_height - .remove(height) - .expect("Orchard note commitment tree must be present if block was added to chain"); + + self.orchard_trees_by_height.remove(height); trace!(?height, ?position, ?anchor, "removing orchard tree"); @@ -811,6 +911,26 @@ impl Chain { "Orchard anchor must be present if block was added to chain" ); } + + // # Invariant + // + // The height following after the removed heights in a non-empty non-finalized state must + // always have its tree. + // + // The loop above can violate the invariant, and if `position` is [`RevertPosition::Root`], + // it will always violate the invariant. We restore the invariant by storing the highest + // (rightmost) removed tree just above `height` if there is no tree at that height. + if !self.is_empty() && height < self.non_finalized_tip_height() { + let next_height = height.next(); + + if self.orchard_trees_by_height.get(&next_height).is_none() { + // TODO: Use `try_insert` once it stabilises. + self.orchard_trees_by_height.insert( + next_height, + highest_removed_tree.expect("There should be a cached removed tree."), + ); + } + } } /// Returns the History tree of the tip of this [`Chain`], @@ -850,16 +970,11 @@ impl Chain { // Use the previously cached root which was calculated in parallel. trace!(?height, "adding history tree"); - // TODO: fix test code that incorrectly overwrites trees - #[cfg(not(test))] assert_eq!( self.history_trees_by_height.insert(height, tree), None, "incorrect overwrite of history tree: trees must be reverted then inserted", ); - - #[cfg(test)] - self.history_trees_by_height.insert(height, tree); } /// Remove the History tree index at `height`. @@ -969,7 +1084,7 @@ impl Chain { /// Return the non-finalized tip block for this chain, /// or `None` if `self.blocks` is empty. - pub fn tip_block(&self) -> Option<&ContextuallyValidBlock> { + pub fn tip_block(&self) -> Option<&ContextuallyVerifiedBlock> { self.blocks.values().next_back() } @@ -1123,12 +1238,12 @@ impl Chain { /// Update the chain tip with the `contextually_valid` block, /// running note commitment tree updates in parallel with other updates. /// - /// Used to implement `update_chain_tip_with::`. + /// Used to implement `update_chain_tip_with::`. #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with_block_parallel( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let height = contextually_valid.height; @@ -1186,12 +1301,12 @@ impl Chain { /// Update the chain tip with the `contextually_valid` block, /// except for the note commitment and history tree updates. /// - /// Used to implement `update_chain_tip_with::`. + /// Used to implement `update_chain_tip_with::`. #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with_block_except_trees( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, ) -> Result<(), ValidateContextError> { let ( block, @@ -1327,12 +1442,12 @@ trait UpdateWith { fn revert_chain_with(&mut self, _: &T, position: RevertPosition); } -impl UpdateWith for Chain { +impl UpdateWith for Chain { #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] #[allow(clippy::unwrap_in_result)] fn update_chain_tip_with( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, ) -> Result<(), ValidateContextError> { self.update_chain_tip_with_block_parallel(contextually_valid) } @@ -1340,7 +1455,7 @@ impl UpdateWith for Chain { #[instrument(skip(self, contextually_valid), fields(block = %contextually_valid.block))] fn revert_chain_with( &mut self, - contextually_valid: &ContextuallyValidBlock, + contextually_valid: &ContextuallyVerifiedBlock, position: RevertPosition, ) { let ( diff --git a/zebra-state/src/service/non_finalized_state/tests/prop.rs b/zebra-state/src/service/non_finalized_state/tests/prop.rs index 32bd8e300cf..ea72609bcc4 100644 --- a/zebra-state/src/service/non_finalized_state/tests/prop.rs +++ b/zebra-state/src/service/non_finalized_state/tests/prop.rs @@ -17,7 +17,7 @@ use zebra_chain::{ use crate::{ arbitrary::Prepare, - request::ContextuallyValidBlock, + request::ContextuallyVerifiedBlock, service::{ arbitrary::PreparedChain, finalized_state::FinalizedState, @@ -53,9 +53,9 @@ fn push_genesis_chain() -> Result<()> { chain_values.insert(None, (None, only_chain.chain_value_pools.into())); - for block in chain.iter().take(count).cloned() { + for block in chain.iter().take(count).skip(1).cloned() { let block = - ContextuallyValidBlock::with_block_and_spent_utxos( + ContextuallyVerifiedBlock::with_block_and_spent_utxos( block, only_chain.unspent_utxos(), ) @@ -72,7 +72,7 @@ fn push_genesis_chain() -> Result<()> { chain_values.insert(block.height.into(), (block.chain_value_pool_change.into(), only_chain.chain_value_pools.into())); } - prop_assert_eq!(only_chain.blocks.len(), count); + prop_assert_eq!(only_chain.blocks.len(), count - 1); }); Ok(()) @@ -104,7 +104,7 @@ fn push_history_tree_chain() -> Result<()> { for block in chain .iter() .take(count) - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { only_chain = only_chain.push(block)?; } @@ -150,8 +150,8 @@ fn forked_equals_pushed_genesis() -> Result<()> { empty_tree.clone(), ValueBalance::zero(), ); - for block in chain.iter().take(fork_at_count).cloned() { - let block = ContextuallyValidBlock::with_block_and_spent_utxos( + for block in chain.iter().take(fork_at_count).skip(1).cloned() { + let block = ContextuallyVerifiedBlock::with_block_and_spent_utxos( block, partial_chain.unspent_utxos(), )?; @@ -170,14 +170,12 @@ fn forked_equals_pushed_genesis() -> Result<()> { empty_tree, ValueBalance::zero(), ); + for block in chain.iter().cloned() { let block = - ContextuallyValidBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?; - full_chain = full_chain - .push(block.clone()) - .expect("full chain push is valid"); + ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, full_chain.unspent_utxos())?; - // Check some other properties of generated chains. + // Check some properties of the genesis block and don't push it to the chain. if block.height == block::Height(0) { prop_assert_eq!( block @@ -188,11 +186,13 @@ fn forked_equals_pushed_genesis() -> Result<()> { .filter_map(|i| i.outpoint()) .count(), 0, - "unexpected transparent prevout input at height {:?}: \ - genesis transparent outputs must be ignored, \ - so there can not be any spends in the genesis block", - block.height, + "Unexpected transparent prevout input at height 0. Genesis transparent outputs \ + must be ignored, so there can not be any spends in the genesis block.", ); + } else { + full_chain = full_chain + .push(block) + .expect("full chain push is valid"); } } @@ -216,7 +216,7 @@ fn forked_equals_pushed_genesis() -> Result<()> { // same original full chain. for block in chain.iter().skip(fork_at_count).cloned() { let block = - ContextuallyValidBlock::with_block_and_spent_utxos(block, forked.unspent_utxos())?; + ContextuallyVerifiedBlock::with_block_and_spent_utxos(block, forked.unspent_utxos())?; forked = forked.push(block).expect("forked chain push is valid"); } @@ -256,13 +256,13 @@ fn forked_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .take(fork_at_count) - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { partial_chain = partial_chain.push(block)?; } for block in chain .iter() - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { full_chain = full_chain.push(block.clone())?; } @@ -279,7 +279,7 @@ fn forked_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .skip(fork_at_count) - .map(ContextuallyValidBlock::test_with_zero_chain_pool_change) { + .map(ContextuallyVerifiedBlock::test_with_zero_chain_pool_change) { forked = forked.push(block)?; } @@ -310,7 +310,7 @@ fn finalized_equals_pushed_genesis() -> Result<()> { // TODO: fix this test or the code so the full_chain temporary trees aren't overwritten let chain = chain.iter() .filter(|block| block.height != Height(0)) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos); + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos); // use `end_count` as the number of non-finalized blocks at the end of the chain, // make sure this test pushes at least 1 block in the partial chain. @@ -399,7 +399,7 @@ fn finalized_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .take(finalized_count) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos) { + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) { full_chain = full_chain.push(block)?; } @@ -416,14 +416,14 @@ fn finalized_equals_pushed_history_tree() -> Result<()> { for block in chain .iter() .skip(finalized_count) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos) { + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) { partial_chain = partial_chain.push(block.clone())?; } for block in chain .iter() .skip(finalized_count) - .map(ContextuallyValidBlock::test_with_zero_spent_utxos) { + .map(ContextuallyVerifiedBlock::test_with_zero_spent_utxos) { full_chain= full_chain.push(block.clone())?; } @@ -460,7 +460,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> { .unwrap_or(DEFAULT_PARTIAL_CHAIN_PROPTEST_CASES)), |((chain, valid_count, network, mut bad_block) in (PreparedChain::default(), any::(), any::()) .prop_flat_map(|((chain, valid_count, network, _history_tree), is_nu5, is_v5)| { - let next_height = chain[valid_count - 1].height; + let next_height = chain[valid_count].height; ( Just(chain), Just(valid_count), @@ -486,7 +486,7 @@ fn rejection_restores_internal_state_genesis() -> Result<()> { // use `valid_count` as the number of valid blocks before an invalid block let valid_tip_height = chain[valid_count - 1].height; let valid_tip_hash = chain[valid_count - 1].hash; - let mut chain = chain.iter().take(valid_count).cloned(); + let mut chain = chain.iter().take(valid_count).skip(1).cloned(); prop_assert!(state.eq_internal_state(&state)); diff --git a/zebra-state/src/service/non_finalized_state/tests/vectors.rs b/zebra-state/src/service/non_finalized_state/tests/vectors.rs index 23242fabfc3..34242be752a 100644 --- a/zebra-state/src/service/non_finalized_state/tests/vectors.rs +++ b/zebra-state/src/service/non_finalized_state/tests/vectors.rs @@ -65,6 +65,9 @@ fn construct_many() -> Result<()> { let mut block: Arc = zebra_test::vectors::BLOCK_MAINNET_434873_BYTES.zcash_deserialize_into()?; + let initial_height = block + .coinbase_height() + .expect("Block 434873 should have its height in its coinbase tx."); let mut blocks = vec![]; while blocks.len() < 100 { @@ -75,7 +78,7 @@ fn construct_many() -> Result<()> { let mut chain = Chain::new( Network::Mainnet, - Height(block.coinbase_height().unwrap().0 - 1), + (initial_height - 1).expect("Initial height should be at least 1."), Default::default(), Default::default(), Default::default(), @@ -213,13 +216,12 @@ fn finalize_pops_from_best_chain_for_network(network: Network) -> Result<()> { state.commit_block(block2.clone().prepare(), &finalized_state)?; state.commit_block(child.prepare(), &finalized_state)?; - let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.finalized; - assert_eq!(block1, finalized.block); + let finalized = state.finalize().inner_block(); - let finalized_with_trees = state.finalize(); - let finalized = finalized_with_trees.finalized; - assert_eq!(block2, finalized.block); + assert_eq!(block1, finalized); + + let finalized = state.finalize().inner_block(); + assert_eq!(block2, finalized); assert!(state.best_chain().is_none()); diff --git a/zebra-state/src/service/pending_utxos.rs b/zebra-state/src/service/pending_utxos.rs index 953aba4a97c..c60719825f9 100644 --- a/zebra-state/src/service/pending_utxos.rs +++ b/zebra-state/src/service/pending_utxos.rs @@ -60,14 +60,6 @@ impl PendingUtxos { } } - /// Check the list of pending UTXO requests against the supplied [`transparent::Utxo`] index. - #[inline] - pub fn check_against(&mut self, utxos: &HashMap) { - for (outpoint, utxo) in utxos.iter() { - self.respond(outpoint, utxo.clone()) - } - } - /// Scan the set of waiting utxo requests for channels where all receivers /// have been dropped and remove the corresponding sender. pub fn prune(&mut self) { diff --git a/zebra-state/src/service/queued_blocks.rs b/zebra-state/src/service/queued_blocks.rs index 7343208570c..dabd36082ca 100644 --- a/zebra-state/src/service/queued_blocks.rs +++ b/zebra-state/src/service/queued_blocks.rs @@ -10,20 +10,20 @@ use tracing::instrument; use zebra_chain::{block, transparent}; -use crate::{BoxError, FinalizedBlock, PreparedBlock}; +use crate::{BoxError, CheckpointVerifiedBlock, SemanticallyVerifiedBlock}; #[cfg(test)] mod tests; -/// A queued finalized block, and its corresponding [`Result`] channel. -pub type QueuedFinalized = ( - FinalizedBlock, +/// A queued checkpoint verified block, and its corresponding [`Result`] channel. +pub type QueuedCheckpointVerified = ( + CheckpointVerifiedBlock, oneshot::Sender>, ); -/// A queued non-finalized block, and its corresponding [`Result`] channel. -pub type QueuedNonFinalized = ( - PreparedBlock, +/// A queued semantically verified block, and its corresponding [`Result`] channel. +pub type QueuedSemanticallyVerified = ( + SemanticallyVerifiedBlock, oneshot::Sender>, ); @@ -31,7 +31,7 @@ pub type QueuedNonFinalized = ( #[derive(Debug, Default)] pub struct QueuedBlocks { /// Blocks awaiting their parent blocks for contextual verification. - blocks: HashMap, + blocks: HashMap, /// Hashes from `queued_blocks`, indexed by parent hash. by_parent: HashMap>, /// Hashes from `queued_blocks`, indexed by block height. @@ -47,7 +47,7 @@ impl QueuedBlocks { /// /// - if a block with the same `block::Hash` has already been queued. #[instrument(skip(self), fields(height = ?new.0.height, hash = %new.0.hash))] - pub fn queue(&mut self, new: QueuedNonFinalized) { + pub fn queue(&mut self, new: QueuedSemanticallyVerified) { let new_hash = new.0.hash; let new_height = new.0.height; let parent_hash = new.0.block.header.previous_block_hash; @@ -86,7 +86,10 @@ impl QueuedBlocks { /// Dequeue and return all blocks that were waiting for the arrival of /// `parent`. #[instrument(skip(self), fields(%parent_hash))] - pub fn dequeue_children(&mut self, parent_hash: block::Hash) -> Vec { + pub fn dequeue_children( + &mut self, + parent_hash: block::Hash, + ) -> Vec { let queued_children = self .by_parent .remove(&parent_hash) @@ -176,7 +179,7 @@ impl QueuedBlocks { } /// Return the queued block if it has already been registered - pub fn get_mut(&mut self, hash: &block::Hash) -> Option<&mut QueuedNonFinalized> { + pub fn get_mut(&mut self, hash: &block::Hash) -> Option<&mut QueuedSemanticallyVerified> { self.blocks.get_mut(hash) } @@ -208,7 +211,7 @@ impl QueuedBlocks { /// Returns all key-value pairs of blocks as an iterator. /// /// Doesn't update the metrics, because it is only used when the state is being dropped. - pub fn drain(&mut self) -> Drain<'_, block::Hash, QueuedNonFinalized> { + pub fn drain(&mut self) -> Drain<'_, block::Hash, QueuedSemanticallyVerified> { self.known_utxos.clear(); self.known_utxos.shrink_to_fit(); self.by_parent.clear(); @@ -242,7 +245,7 @@ impl SentHashes { /// /// Assumes that blocks are added in the order of their height between `finish_batch` calls /// for efficient pruning. - pub fn add(&mut self, block: &PreparedBlock) { + pub fn add(&mut self, block: &SemanticallyVerifiedBlock) { // Track known UTXOs in sent blocks. let outpoints = block .new_outputs @@ -261,23 +264,24 @@ impl SentHashes { self.update_metrics_for_block(block.height); } - /// Stores the finalized `block`'s hash, height, and UTXOs, so they can be used to check if a + /// Stores the checkpoint verified `block`'s hash, height, and UTXOs, so they can be used to check if a /// block or UTXO is available in the state. /// - /// Used for finalized blocks close to the final checkpoint, so non-finalized blocks can look up + /// Used for checkpoint verified blocks close to the final checkpoint, so the semantic block verifier can look up /// their UTXOs. /// /// Assumes that blocks are added in the order of their height between `finish_batch` calls /// for efficient pruning. /// /// For more details see `add()`. - pub fn add_finalized(&mut self, block: &FinalizedBlock) { + pub fn add_finalized(&mut self, block: &CheckpointVerifiedBlock) { // Track known UTXOs in sent blocks. let outpoints = block .new_outputs .iter() - .map(|(outpoint, utxo)| { - self.known_utxos.insert(*outpoint, utxo.clone()); + .map(|(outpoint, ordered_utxo)| { + self.known_utxos + .insert(*outpoint, ordered_utxo.utxo.clone()); outpoint }) .cloned() diff --git a/zebra-state/src/service/queued_blocks/tests/vectors.rs b/zebra-state/src/service/queued_blocks/tests/vectors.rs index bd8dcbeb8e2..203caf706e6 100644 --- a/zebra-state/src/service/queued_blocks/tests/vectors.rs +++ b/zebra-state/src/service/queued_blocks/tests/vectors.rs @@ -9,17 +9,17 @@ use zebra_test::prelude::*; use crate::{ arbitrary::Prepare, - service::queued_blocks::{QueuedBlocks, QueuedNonFinalized}, + service::queued_blocks::{QueuedBlocks, QueuedSemanticallyVerified}, tests::FakeChainHelper, }; // Quick helper trait for making queued blocks with throw away channels trait IntoQueued { - fn into_queued(self) -> QueuedNonFinalized; + fn into_queued(self) -> QueuedSemanticallyVerified; } impl IntoQueued for Arc { - fn into_queued(self) -> QueuedNonFinalized { + fn into_queued(self) -> QueuedSemanticallyVerified { let (rsp_tx, _) = oneshot::channel(); (self.prepare(), rsp_tx) } diff --git a/zebra-state/src/service/read/address/tx_id.rs b/zebra-state/src/service/read/address/tx_id.rs index 560f7b101c4..27b9a9b39dc 100644 --- a/zebra-state/src/service/read/address/tx_id.rs +++ b/zebra-state/src/service/read/address/tx_id.rs @@ -276,8 +276,5 @@ fn apply_tx_id_changes( ) -> BTreeMap { // Correctness: compensate for inconsistent tx IDs finalized blocks across multiple addresses, // by combining them with overlapping non-finalized block tx IDs. - finalized_tx_ids - .into_iter() - .chain(chain_tx_ids.into_iter()) - .collect() + finalized_tx_ids.into_iter().chain(chain_tx_ids).collect() } diff --git a/zebra-state/src/service/read/address/utxo.rs b/zebra-state/src/service/read/address/utxo.rs index 7ee5cb4f110..30bcad2c555 100644 --- a/zebra-state/src/service/read/address/utxo.rs +++ b/zebra-state/src/service/read/address/utxo.rs @@ -370,7 +370,7 @@ fn apply_utxo_changes( // to compensate for overlapping finalized and non-finalized blocks. finalized_utxos .into_iter() - .chain(created_chain_utxos.into_iter()) + .chain(created_chain_utxos) .filter(|(utxo_location, _output)| !spent_chain_utxos.contains(utxo_location)) .collect() } diff --git a/zebra-state/src/service/read/tree.rs b/zebra-state/src/service/read/tree.rs index 704637d3bff..9f05f1d25d4 100644 --- a/zebra-state/src/service/read/tree.rs +++ b/zebra-state/src/service/read/tree.rs @@ -38,7 +38,7 @@ where // in memory, but `db` stores blocks on disk, with a memory cache.) chain .and_then(|chain| chain.as_ref().sapling_tree(hash_or_height)) - .or_else(|| db.sapling_tree(hash_or_height)) + .or_else(|| db.sapling_tree_by_hash_or_height(hash_or_height)) } /// Returns the Orchard @@ -59,7 +59,7 @@ where // in memory, but `db` stores blocks on disk, with a memory cache.) chain .and_then(|chain| chain.as_ref().orchard_tree(hash_or_height)) - .or_else(|| db.orchard_tree(hash_or_height)) + .or_else(|| db.orchard_tree_by_hash_or_height(hash_or_height)) } #[cfg(feature = "getblocktemplate-rpcs")] diff --git a/zebra-state/src/service/tests.rs b/zebra-state/src/service/tests.rs index 4723e9b6856..b7e55b9a9d9 100644 --- a/zebra-state/src/service/tests.rs +++ b/zebra-state/src/service/tests.rs @@ -23,7 +23,7 @@ use crate::{ init_test, service::{arbitrary::populated_state, chain_tip::TipAction, StateService}, tests::setup::{partial_nu5_chain_strategy, transaction_v4_from_coinbase}, - BoxError, Config, FinalizedBlock, PreparedBlock, Request, Response, + BoxError, CheckpointVerifiedBlock, Config, Request, Response, SemanticallyVerifiedBlock, }; const LAST_BLOCK_HEIGHT: u32 = 10; @@ -216,7 +216,7 @@ async fn empty_state_still_responds_to_requests() -> Result<()> { zebra_test::vectors::BLOCK_MAINNET_419200_BYTES.zcash_deserialize_into::>()?; let iter = vec![ - // No checks for CommitBlock or CommitFinalizedBlock because empty state + // No checks for SemanticallyVerifiedBlock or CommitCheckpointVerifiedBlock because empty state // precondition doesn't matter to them (Request::Depth(block.hash()), Ok(Response::Depth(None))), (Request::Tip, Ok(Response::Tip(None))), @@ -419,12 +419,12 @@ proptest! { // the genesis block has a zero-valued transparent output, // which is not included in the UTXO set if block.height > block::Height(0) { - let utxos = &block.new_outputs; + let utxos = &block.new_outputs.iter().map(|(k, ordered_utxo)| (*k, ordered_utxo.utxo.clone())).collect(); let block_value_pool = &block.block.chain_value_pool_change(utxos)?; expected_finalized_value_pool += *block_value_pool; } - let result_receiver = state_service.queue_and_commit_finalized(block.clone()); + let result_receiver = state_service.queue_and_commit_to_finalized_state(block.clone()); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed finalized block commit: {:?}", result); @@ -450,7 +450,7 @@ proptest! { let block_value_pool = &block.block.chain_value_pool_change(&transparent::utxos_from_ordered_utxos(utxos))?; expected_non_finalized_value_pool += *block_value_pool; - let result_receiver = state_service.queue_and_commit_non_finalized(block.clone()); + let result_receiver = state_service.queue_and_commit_to_non_finalized_state(block.clone()); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed non-finalized block commit: {:?}", result); @@ -509,7 +509,7 @@ proptest! { TipAction::grow_with(expected_block.clone().into()) }; - let result_receiver = state_service.queue_and_commit_finalized(block); + let result_receiver = state_service.queue_and_commit_to_finalized_state(block); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed finalized block commit: {:?}", result); @@ -532,7 +532,7 @@ proptest! { TipAction::grow_with(expected_block.clone().into()) }; - let result_receiver = state_service.queue_and_commit_non_finalized(block); + let result_receiver = state_service.queue_and_commit_to_non_finalized_state(block); let result = result_receiver.blocking_recv(); prop_assert!(result.is_ok(), "unexpected failed non-finalized block commit: {:?}", result); @@ -555,8 +555,8 @@ proptest! { fn continuous_empty_blocks_from_test_vectors() -> impl Strategy< Value = ( Network, - SummaryDebug>, - SummaryDebug>, + SummaryDebug>, + SummaryDebug>, ), > { any::() @@ -567,7 +567,7 @@ fn continuous_empty_blocks_from_test_vectors() -> impl Strategy< Network::Testnet => &*zebra_test::vectors::CONTINUOUS_TESTNET_BLOCKS, }; - // Transform the test vector's block bytes into a vector of `PreparedBlock`s. + // Transform the test vector's block bytes into a vector of `SemanticallyVerifiedBlock`s. let blocks: Vec<_> = raw_blocks .iter() .map(|(_height, &block_bytes)| { @@ -591,7 +591,7 @@ fn continuous_empty_blocks_from_test_vectors() -> impl Strategy< let non_finalized_blocks = blocks.split_off(finalized_blocks_count); let finalized_blocks: Vec<_> = blocks .into_iter() - .map(|prepared_block| FinalizedBlock::from(prepared_block.block)) + .map(|prepared_block| CheckpointVerifiedBlock::from(prepared_block.block)) .collect(); ( diff --git a/zebra-state/src/service/write.rs b/zebra-state/src/service/write.rs index ab7b466ce8a..cb36b6a2ba5 100644 --- a/zebra-state/src/service/write.rs +++ b/zebra-state/src/service/write.rs @@ -17,10 +17,10 @@ use crate::{ check, finalized_state::{FinalizedState, ZebraDb}, non_finalized_state::NonFinalizedState, - queued_blocks::{QueuedFinalized, QueuedNonFinalized}, + queued_blocks::{QueuedCheckpointVerified, QueuedSemanticallyVerified}, BoxError, ChainTipBlock, ChainTipSender, CloneError, }, - CommitBlockError, PreparedBlock, + CommitSemanticallyVerifiedError, SemanticallyVerifiedBlock, }; // These types are used in doc links @@ -49,8 +49,8 @@ const PARENT_ERROR_MAP_LIMIT: usize = MAX_BLOCK_REORG_HEIGHT as usize * 2; pub(crate) fn validate_and_commit_non_finalized( finalized_state: &ZebraDb, non_finalized_state: &mut NonFinalizedState, - prepared: PreparedBlock, -) -> Result<(), CommitBlockError> { + prepared: SemanticallyVerifiedBlock, +) -> Result<(), CommitSemanticallyVerifiedError> { check::initial_contextual_validity(finalized_state, non_finalized_state, &prepared)?; let parent_hash = prepared.block.header.previous_block_hash; @@ -131,8 +131,8 @@ fn update_latest_chain_channels( ) )] pub fn write_blocks_from_channels( - mut finalized_block_write_receiver: UnboundedReceiver, - mut non_finalized_block_write_receiver: UnboundedReceiver, + mut finalized_block_write_receiver: UnboundedReceiver, + mut non_finalized_block_write_receiver: UnboundedReceiver, mut finalized_state: FinalizedState, mut non_finalized_state: NonFinalizedState, invalid_block_reset_sender: UnboundedSender, @@ -140,6 +140,7 @@ pub fn write_blocks_from_channels( non_finalized_state_sender: watch::Sender, ) { let mut last_zebra_mined_log_height = None; + let mut prev_finalized_note_commitment_trees = None; // Write all the finalized blocks sent by the state, // until the state closes the finalized block channel's sender. @@ -178,9 +179,12 @@ pub fn write_blocks_from_channels( } // Try committing the block - match finalized_state.commit_finalized(ordered_block) { - Ok(finalized) => { + match finalized_state + .commit_finalized(ordered_block, prev_finalized_note_commitment_trees.take()) + { + Ok((finalized, note_commitment_trees)) => { let tip_block = ChainTipBlock::from(finalized); + prev_finalized_note_commitment_trees = Some(note_commitment_trees); log_if_mined_by_zebra(&tip_block, &mut last_zebra_mined_log_height); @@ -288,12 +292,12 @@ pub fn write_blocks_from_channels( while non_finalized_state.best_chain_len() > MAX_BLOCK_REORG_HEIGHT { tracing::trace!("finalizing block past the reorg limit"); - let finalized_with_trees = non_finalized_state.finalize(); - finalized_state - .commit_finalized_direct(finalized_with_trees, "best non-finalized chain root") + let contextually_verified_with_trees = non_finalized_state.finalize(); + prev_finalized_note_commitment_trees = finalized_state + .commit_finalized_direct(contextually_verified_with_trees, prev_finalized_note_commitment_trees.take(), "commit contextually-verified request") .expect( "unexpected finalized block commit error: note commitment and history trees were already checked by the non-finalized state", - ); + ).1.into(); } // Update the metrics if semantic and contextual validation passes diff --git a/zebra-state/src/tests.rs b/zebra-state/src/tests.rs index 1133542df1a..488ab4227bd 100644 --- a/zebra-state/src/tests.rs +++ b/zebra-state/src/tests.rs @@ -41,7 +41,7 @@ impl FakeChainHelper for Arc { _ => panic!("block must have a coinbase height to create a child"), } - child.transactions.push(tx); + child.transactions.insert(0, tx); Arc::make_mut(&mut child.header).previous_block_hash = parent_hash; Arc::new(child) diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index e84ef6b975d..296ee10a0e1 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -18,7 +18,7 @@ use crate::{ service::{ check, finalized_state::FinalizedState, non_finalized_state::NonFinalizedState, read, }, - Config, FinalizedBlock, + CheckpointVerifiedBlock, Config, }; /// Generate a chain that allows us to make tests for the legacy chain rules. @@ -83,8 +83,8 @@ pub(crate) fn partial_nu5_chain_strategy( /// Return a new `StateService` containing the mainnet genesis block. /// Also returns the finalized genesis block itself. -pub(crate) fn new_state_with_mainnet_genesis() -> (FinalizedState, NonFinalizedState, FinalizedBlock) -{ +pub(crate) fn new_state_with_mainnet_genesis( +) -> (FinalizedState, NonFinalizedState, CheckpointVerifiedBlock) { let genesis = zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES .zcash_deserialize_into::>() .expect("block should deserialize"); @@ -105,9 +105,9 @@ pub(crate) fn new_state_with_mainnet_genesis() -> (FinalizedState, NonFinalizedS read::best_tip(&non_finalized_state, &finalized_state.db) ); - let genesis = FinalizedBlock::from(genesis); + let genesis = CheckpointVerifiedBlock::from(genesis); finalized_state - .commit_finalized_direct(genesis.clone().into(), "test") + .commit_finalized_direct(genesis.clone().into(), None, "test") .expect("unexpected invalid genesis block test vector"); assert_eq!( diff --git a/zebra-state/tests/basic.rs b/zebra-state/tests/basic.rs index 9aebfeb775e..638ab0f1a41 100644 --- a/zebra-state/tests/basic.rs +++ b/zebra-state/tests/basic.rs @@ -25,7 +25,7 @@ static COMMIT_FINALIZED_BLOCK_MAINNET: Lazy< let hash = block.hash(); vec![ ( - Request::CommitFinalizedBlock(block.into()), + Request::CommitCheckpointVerifiedBlock(block.into()), Ok(Response::Committed(hash)), ), ( @@ -46,7 +46,7 @@ static COMMIT_FINALIZED_BLOCK_TESTNET: Lazy< let hash = block.hash(); vec![ ( - Request::CommitFinalizedBlock(block.into()), + Request::CommitCheckpointVerifiedBlock(block.into()), Ok(Response::Committed(hash)), ), ( diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index f5dd2e7a1b4..7544d4793cf 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,23 +1,30 @@ [package] name = "zebra-test" -version = "1.0.0-beta.24" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" -indexmap = "1.9.3" +indexmap = "2.0.0" lazy_static = "1.4.0" -insta = "1.29.0" -proptest = "1.1.0" -once_cell = "1.17.1" -rand = { version = "0.8.5", package = "rand" } -regex = "1.8.1" +insta = "1.31.0" +proptest = "1.2.0" +once_cell = "1.18.0" +rand = "0.8.5" +regex = "1.9.3" -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } futures = "0.3.28" @@ -29,11 +36,11 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } humantime = "2.1.0" owo-colors = "3.5.0" spandoc = "0.2.2" -thiserror = "1.0.40" +thiserror = "1.0.44" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-error = "0.2.0" tracing = "0.1.37" [dev-dependencies] -tempfile = "3.5.0" +tempfile = "3.7.1" diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index dae47defba3..75f45fc6d65 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -540,7 +540,9 @@ impl TestChild { // Read unread child output. // // This checks for failure logs, and prevents some test hangs and deadlocks. - if self.child.is_some() || self.stdout.is_some() { + // + // TODO: this could block if stderr is full and stdout is waiting for stderr to be read. + if self.stdout.is_some() { let wrote_lines = self.wait_for_stdout_line(format!("\n{} Child Stdout:", self.command_path)); @@ -552,7 +554,7 @@ impl TestChild { } } - if self.child.is_some() || self.stderr.is_some() { + if self.stderr.is_some() { let wrote_lines = self.wait_for_stderr_line(format!("\n{} Child Stderr:", self.command_path)); @@ -566,6 +568,56 @@ impl TestChild { kill_result } + /// Kill the process, and return all its remaining standard output and standard error output. + /// + /// If `ignore_exited` is `true`, log "can't kill an exited process" errors, + /// rather than returning them. + /// + /// Returns `Ok(output)`, or an error if the kill failed. + pub fn kill_and_return_output(&mut self, ignore_exited: bool) -> Result { + self.apply_failure_regexes_to_outputs(); + + // Prevent a hang when consuming output, + // by making sure the child's output actually finishes. + let kill_result = self.kill(ignore_exited); + + // Read unread child output. + let mut stdout_buf = String::new(); + let mut stderr_buf = String::new(); + + // This also checks for failure logs, and prevents some test hangs and deadlocks. + loop { + let mut remaining_output = false; + + if let Some(stdout) = self.stdout.as_mut() { + if let Some(line) = + Self::wait_and_return_output_line(stdout, self.bypass_test_capture) + { + stdout_buf.push_str(&line); + remaining_output = true; + } + } + + if let Some(stderr) = self.stderr.as_mut() { + if let Some(line) = + Self::wait_and_return_output_line(stderr, self.bypass_test_capture) + { + stderr_buf.push_str(&line); + remaining_output = true; + } + } + + if !remaining_output { + break; + } + } + + let mut output = stdout_buf; + output.push_str(&stderr_buf); + + kill_result.map(|()| output) + } + /// Waits until a line of standard output is available, then consumes it. /// /// If there is a line, and `write_context` is `Some`, writes the context to the test logs. @@ -632,15 +684,40 @@ impl TestChild { false } + /// Waits until a line of `output` is available, then returns it. + /// + /// If there is a line, and `write_context` is `Some`, writes the context to the test logs. + /// Always writes the line to the test logs. + /// + /// Returns `true` if a line was available, + /// or `false` if the standard output has finished. + #[allow(clippy::unwrap_in_result)] + fn wait_and_return_output_line( + mut output: impl Iterator>, + bypass_test_capture: bool, + ) -> Option { + if let Some(line_result) = output.next() { + let line_result = line_result.expect("failure reading test process logs"); + + Self::write_to_test_logs(&line_result, bypass_test_capture); + + return Some(line_result); + } + + None + } + /// Waits for the child process to exit, then returns its output. /// + /// # Correctness + /// /// The other test child output methods take one or both outputs, /// making them unavailable to this method. /// /// Ignores any configured timeouts. /// - /// Returns an error if the child has already been taken, - /// or both outputs have already been taken. + /// Returns an error if the child has already been taken. + /// TODO: return an error if both outputs have already been taken. #[spandoc::spandoc] pub fn wait_with_output(mut self) -> Result> { let child = match self.child.take() { @@ -708,6 +785,8 @@ impl TestChild { /// /// Kills the child on error, or after the configured timeout has elapsed. /// See [`Self::expect_line_matching_regex_set`] for details. + // + // TODO: these methods could block if stderr is full and stdout is waiting for stderr to be read #[instrument(skip(self))] #[allow(clippy::unwrap_in_result)] pub fn expect_stdout_line_matches(&mut self, success_regex: R) -> Result @@ -1293,6 +1372,11 @@ impl TestOutput { fn was_killed(&self) -> bool { self.output.status.signal() == Some(9) } + + /// Takes the generic `dir` parameter out of this `TestOutput`. + pub fn take_dir(&mut self) -> Option { + self.dir.take() + } } /// Add context to an error report diff --git a/zebra-test/src/mock_service.rs b/zebra-test/src/mock_service.rs index 21debf97c13..25f379034e6 100644 --- a/zebra-test/src/mock_service.rs +++ b/zebra-test/src/mock_service.rs @@ -146,6 +146,7 @@ pub struct ResponseSender { impl Service for MockService where + Request: Send + 'static, Response: Send + 'static, Error: Send + 'static, { @@ -740,7 +741,10 @@ impl ResponseSender { /// This method takes ownership of the [`ResponseSender`] so that only one response can be /// sent. /// - /// If `respond` or `respond_with` are not called, the caller will panic. + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. /// /// # Example /// @@ -748,6 +752,9 @@ impl ResponseSender { /// # use zebra_test::mock_service::MockService; /// # use tower::{Service, ServiceExt}; /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # /// # let reactor = tokio::runtime::Builder::new_current_thread() /// # .enable_all() /// # .build() @@ -760,19 +767,19 @@ impl ResponseSender { /// /// # let mut service = mock_service.clone(); /// # let task = tokio::spawn(async move { - /// # let first_call_result = (&mut service).oneshot(1).await; - /// # let second_call_result = service.oneshot(1).await; + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; /// # /// # (first_call_result, second_call_result) /// # }); /// # /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await - /// .respond("Received one".to_owned()); + /// .respond("Received Request".to_owned()); /// /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await /// .respond(Err("Duplicate request")); /// # }); @@ -789,7 +796,10 @@ impl ResponseSender { /// This method takes ownership of the [`ResponseSender`] so that only one response can be /// sent. /// - /// If `respond` or `respond_with` are not called, the caller will panic. + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. /// /// # Example /// @@ -797,6 +807,9 @@ impl ResponseSender { /// # use zebra_test::mock_service::MockService; /// # use tower::{Service, ServiceExt}; /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # /// # let reactor = tokio::runtime::Builder::new_current_thread() /// # .enable_all() /// # .build() @@ -809,21 +822,21 @@ impl ResponseSender { /// /// # let mut service = mock_service.clone(); /// # let task = tokio::spawn(async move { - /// # let first_call_result = (&mut service).oneshot(1).await; - /// # let second_call_result = service.oneshot(1).await; + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; /// # /// # (first_call_result, second_call_result) /// # }); /// # /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await - /// .respond_with(|req| format!("Received: {}", req)); + /// .respond_with(|req| format!("Received: {req:?}")); /// /// mock_service - /// .expect_request(1) + /// .expect_request(Request) /// .await - /// .respond_with(|req| Err(format!("Duplicate request: {}", req))); + /// .respond_with(|req| Err(format!("Duplicate request: {req:?}"))); /// # }); /// ``` pub fn respond_with(self, response_fn: F) @@ -834,6 +847,116 @@ impl ResponseSender { let response_result = response_fn(self.request()).into_result(); let _ = self.response_sender.send(response_result); } + + /// Respond to the request using a fixed error value. + /// + /// The `error` must be the `Error` type. This helps avoid type resolution issues in the + /// compiler. + /// + /// This method takes ownership of the [`ResponseSender`] so that only one response can be + /// sent. + /// + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. + /// + /// # Example + /// + /// ``` + /// # use zebra_test::mock_service::MockService; + /// # use tower::{Service, ServiceExt}; + /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # struct Response; + /// # + /// # let reactor = tokio::runtime::Builder::new_current_thread() + /// # .enable_all() + /// # .build() + /// # .expect("Failed to build Tokio runtime"); + /// # + /// # reactor.block_on(async { + /// // Mock a service with a `String` as the service `Error` type. + /// let mut mock_service: MockService = + /// MockService::build().for_unit_tests(); + /// + /// # let mut service = mock_service.clone(); + /// # let task = tokio::spawn(async move { + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; + /// # + /// # (first_call_result, second_call_result) + /// # }); + /// # + /// mock_service + /// .expect_request(Request) + /// .await + /// .respond_error("Duplicate request".to_string()); + /// # }); + /// ``` + pub fn respond_error(self, error: Error) { + // TODO: impl ResponseResult for BoxError/Error trait when overlapping impls are + // better supported by the compiler + let _ = self.response_sender.send(Err(error)); + } + + /// Respond to the request by calculating an error from the request. + /// + /// The `error` must be the `Error` type. This helps avoid type resolution issues in the + /// compiler. + /// + /// This method takes ownership of the [`ResponseSender`] so that only one response can be + /// sent. + /// + /// # Panics + /// + /// If one of the `respond*` methods isn't called, the [`MockService`] might panic with a + /// timeout error. + /// + /// # Example + /// + /// ``` + /// # use zebra_test::mock_service::MockService; + /// # use tower::{Service, ServiceExt}; + /// # + /// # #[derive(Debug, PartialEq, Eq)] + /// # struct Request; + /// # struct Response; + /// # + /// # let reactor = tokio::runtime::Builder::new_current_thread() + /// # .enable_all() + /// # .build() + /// # .expect("Failed to build Tokio runtime"); + /// # + /// # reactor.block_on(async { + /// // Mock a service with a `String` as the service `Error` type. + /// let mut mock_service: MockService = + /// MockService::build().for_unit_tests(); + /// + /// # let mut service = mock_service.clone(); + /// # let task = tokio::spawn(async move { + /// # let first_call_result = (&mut service).oneshot(Request).await; + /// # let second_call_result = service.oneshot(Request).await; + /// # + /// # (first_call_result, second_call_result) + /// # }); + /// # + /// mock_service + /// .expect_request(Request) + /// .await + /// .respond_with_error(|req| format!("Duplicate request: {req:?}")); + /// # }); + /// ``` + pub fn respond_with_error(self, response_fn: F) + where + F: FnOnce(&Request) -> Error, + { + // TODO: impl ResponseResult for BoxError/Error trait when overlapping impls are + // better supported by the compiler + let response_result = Err(response_fn(self.request())); + let _ = self.response_sender.send(response_result); + } } /// A representation of an assertion type. diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 46ddc49d752..0befe3849fd 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,10 +1,24 @@ [package] name = "zebra-utils" +version = "1.0.0-beta.28" authors = ["Zcash Foundation "] +description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" -version = "1.0.0-beta.24" +repository = "https://github.com/ZcashFoundation/zebra" edition = "2021" +# zebra-utils has a separate README file +readme = "README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] + +# Zebra is only supported on the latest stable Rust version. See the README for details. +# Any Zebra release can break compatibility with older Rust versions. +rust-version = "1.70" + [[bin]] name = "zebra-checkpoints" # this setting is required for Zebra's Docker build caches @@ -55,23 +69,24 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.96" +serde_json = "1.0.104" tracing-error = "0.2.0" tracing-subscriber = "0.3.17" -thiserror = "1.0.40" +thiserror = "1.0.44" + +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-chain = { path = "../zebra-chain" } +# These crates are needed for the block-template-to-proposal binary +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.28", optional = true } # These crates are needed for the zebra-checkpoints binary -itertools = { version = "0.10.5", optional = true } +itertools = { version = "0.11.0", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.8.1", optional = true } -reqwest = { version = "0.11.18", optional = true } +regex = { version = "1.9.3", optional = true } +# Avoid default openssl dependency to reduce the dependency tree and security alerts. +reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"], optional = true } # These crates are needed for the zebra-checkpoints and search-issue-refs binaries -tokio = { version = "1.28.0", features = ["full"], optional = true } - -# These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", optional = true } +tokio = { version = "1.29.1", features = ["full"], optional = true } diff --git a/zebra-utils/src/bin/search-issue-refs/main.rs b/zebra-utils/src/bin/search-issue-refs/main.rs index 2af6a70769a..cc71ee198a0 100644 --- a/zebra-utils/src/bin/search-issue-refs/main.rs +++ b/zebra-utils/src/bin/search-issue-refs/main.rs @@ -288,11 +288,7 @@ to create a github token." let mut num_closed_issues = 0; while let Some(res) = github_api_requests.join_next().await { - let Ok(( - res, - id, - issue_refs, - )) = res else { + let Ok((res, id, issue_refs)) = res else { println!("warning: failed to join api request thread/task"); continue; }; diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 064848fff4c..27af2edd065 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,17 +1,25 @@ [package] # Crate metadata name = "zebrad" +version = "1.1.0" authors = ["Zcash Foundation "] +description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" -version = "1.0.0-rc.8" repository = "https://github.com/ZcashFoundation/zebra" +readme = "../README.md" +homepage = "https://zfnd.org/zebra/" +# crates.io is limited to 5 keywords and categories +keywords = ["zebra", "zcash"] +# Must be one of +categories = ["command-line-utilities", "cryptography::cryptocurrencies"] + # Settings that impact compilation edition = "2021" -# Zebra is only supported on the latest stable Rust version. Some earlier versions might work. -# Zebra's code uses features introduced in Rust 1.68, or any later stable release. -rust-version = "1.68" +# Zebra is only supported on the latest stable Rust version. See the README for details. +# Any Zebra release can break compatibility with older Rust versions. +rust-version = "1.70" # Settings that impact runtime behaviour @@ -19,10 +27,36 @@ rust-version = "1.68" # when run in the workspace directory default-run = "zebrad" +# `cargo release` settings +[package.metadata.release] +pre-release-replacements = [ + {file="../book/src/user/install.md", search="git checkout [a-z0-9\\.-]+", replace="git checkout v{{version}}"}, + {file="../book/src/user/install.md", search="--tag [a-z0-9\\.-]+", replace="--tag v{{version}}"}, + {file="../book/src/user/docker.md", search="--branch [a-z0-9\\.-]+", replace="--branch v{{version}}"}, +] + +[package.metadata.docs.rs] + +# Publish Zebra's supported production and developer features on docs.rs. +# (Except for the log level features, because there are a lot of them.) +# +# +features = [ + "default-release-binaries", + "filter-reload", + "flamegraph", + "journald", + "prometheus", + "sentry", +] + [features] # In release builds, don't compile debug logging code, to improve performance. default = ["release_max_level_info"] +# Default features for official ZF binary release builds +default-release-binaries = ["default", "sentry"] + # Production features that activate extra dependencies, or extra features in dependencies # Experimental mining RPC support @@ -108,34 +142,38 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain" } -zebra-consensus = { path = "../zebra-consensus" } -zebra-network = { path = "../zebra-network" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-rpc = { path = "../zebra-rpc" } -zebra-state = { path = "../zebra-state" } - -abscissa_core = "0.5" -gumdrop = { version = "0.7", features = ["default_expr"]} -chrono = { version = "0.4.24", default-features = false, features = ["clock", "std"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.28" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.28" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.28" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.28" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.28" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.28" } + +# Required for crates.io publishing, but it's only used in tests +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.28", optional = true } + +abscissa_core = "0.7.0" +clap = { version = "4.3.21", features = ["cargo"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" -indexmap = "1.9.3" +indexmap = "2.0.0" lazy_static = "1.4.0" -serde = { version = "1.0.163", features = ["serde_derive"] } -toml = "0.7.4" +semver = "1.0.18" +serde = { version = "1.0.179", features = ["serde_derive"] } +toml = "0.7.6" futures = "0.3.28" rayon = "1.7.0" -tokio = { version = "1.28.0", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } +tokio = { version = "1.29.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.0" +pin-project = "1.1.3" -color-eyre = { version = "0.6.2", default_features = false, features = ["issue-url"] } +color-eyre = { version = "0.6.2", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } -thiserror = "1.0.40" +thiserror = "1.0.44" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-appender = "0.2.2" @@ -143,16 +181,16 @@ tracing-error = "0.2.0" tracing-futures = "0.2.5" tracing = "0.1.37" -metrics = "0.21.0" +metrics = "0.21.1" dirs = "5.0.1" atty = "0.2.14" num-integer = "0.1.45" -rand = { version = "0.8.5", package = "rand" } +rand = "0.8.5" # prod feature sentry -sentry = { version = "0.31.0", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } +sentry = { version = "0.31.5", default-features = false, features = ["backtrace", "contexts", "reqwest", "rustls", "tracing"], optional = true } # prod feature flamegraph tracing-flame = { version = "0.2.0", optional = true } @@ -162,7 +200,7 @@ inferno = { version = "0.11.15", default-features = false, optional = true } tracing-journald = { version = "0.3.0", optional = true } # prod feature filter-reload -hyper = { version = "0.14.26", features = ["http1", "http2", "server"], optional = true } +hyper = { version = "0.14.27", features = ["http1", "http2", "server"], optional = true } # prod feature prometheus metrics-exporter-prometheus = { version = "0.12.0", default-features = false, features = ["http-listener"], optional = true } @@ -171,48 +209,47 @@ metrics-exporter-prometheus = { version = "0.12.0", default-features = false, fe # # zebrad uses tracing for logging, # we only use `log` to set and print the static log levels in transitive dependencies -log = "0.4.17" +log = "0.4.19" # prod feature progress-bar howudoin = { version = "0.1.2", features = ["term-line"], optional = true } -indicatif = { version = "0.17.3", optional = true } +indicatif = { version = "0.17.6", optional = true } # test feature proptest-impl -proptest = { version = "1.1.0", optional = true } +proptest = { version = "1.2.0", optional = true } proptest-derive = { version = "0.3.0", optional = true } # test feature tokio-console -console-subscriber = { version = "0.1.8", optional = true } +console-subscriber = { version = "0.1.10", optional = true } [build-dependencies] -vergen = { version = "8.1.3", default-features = false, features = ["cargo", "git", "git2", "rustc"] } +vergen = { version = "8.2.4", default-features = false, features = ["cargo", "git", "git2", "rustc"] } # test feature lightwalletd-grpc-tests tonic-build = { version = "0.9.2", optional = true } [dev-dependencies] -abscissa_core = { version = "0.5", features = ["testing"] } +abscissa_core = { version = "0.7.0", features = ["testing"] } hex = "0.4.3" jsonrpc-core = "18.0.0" -once_cell = "1.17.1" -regex = "1.8.1" -semver = "1.0.17" +once_cell = "1.18.0" +regex = "1.9.3" # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.96", features = ["preserve_order"] } -tempfile = "3.5.0" +serde_json = { version = "1.0.104", features = ["preserve_order"] } +tempfile = "3.7.1" -hyper = { version = "0.14.26", features = ["http1", "http2", "server"]} +hyper = { version = "0.14.27", features = ["http1", "http2", "server"]} tracing-test = { version = "0.2.4", features = ["no-env-filter"] } -tokio = { version = "1.28.0", features = ["full", "tracing", "test-util"] } +tokio = { version = "1.29.1", features = ["full", "tracing", "test-util"] } tokio-stream = "0.1.14" # test feature lightwalletd-grpc-tests prost = "0.11.9" tonic = "0.9.2" -proptest = "1.1.0" +proptest = "1.2.0" proptest-derive = "0.3.0" # enable span traces and track caller in tests diff --git a/zebrad/README.md b/zebrad/README.md deleted file mode 100644 index 4dee5cbb2d0..00000000000 --- a/zebrad/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Zebrad - -Zebrad is an application. - -## Getting Started - -This application is authored using [Abscissa], a Rust application framework. - -For more information, see: - -[Documentation] - -[Abscissa]: https://github.com/iqlusioninc/abscissa -[Documentation]: https://docs.rs/abscissa_core/ diff --git a/zebrad/src/application.rs b/zebrad/src/application.rs index d5f3e9e81cd..133465ffa58 100644 --- a/zebrad/src/application.rs +++ b/zebrad/src/application.rs @@ -1,27 +1,27 @@ //! Zebrad Abscissa Application -use std::{fmt::Write as _, io::Write as _, process}; +use std::{env, fmt::Write as _, io::Write as _, process, sync::Arc}; use abscissa_core::{ application::{self, AppCell}, - config::{self, Configurable}, + config::CfgCell, status_err, terminal::{component::Terminal, stderr, stdout, ColorChoice}, - Application, Component, FrameworkError, Shutdown, StandardPaths, Version, + Application, Component, Configurable, FrameworkError, Shutdown, StandardPaths, }; +use semver::{BuildMetadata, Version}; use zebra_network::constants::PORT_IN_USE_ERROR; -use zebra_state::constants::{DATABASE_FORMAT_VERSION, LOCK_FILE_ERROR}; +use zebra_state::{ + constants::LOCK_FILE_ERROR, database_format_version_in_code, database_format_version_on_disk, +}; use crate::{ - commands::ZebradCmd, + commands::EntryPoint, components::{sync::end_of_support::EOS_PANIC_MESSAGE_HEADER, tracing::Tracing}, config::ZebradConfig, }; -mod entry_point; -use entry_point::EntryPoint; - /// See /// Print a fatal error message and exit fn fatal_error(app_name: String, err: &dyn std::error::Error) -> ! { @@ -32,78 +32,119 @@ fn fatal_error(app_name: String, err: &dyn std::error::Error) -> ! { /// Application state pub static APPLICATION: AppCell = AppCell::new(); -/// Obtain a read-only (multi-reader) lock on the application state. +/// Returns the `zebrad` version for this build, in SemVer 2.0 format. /// -/// Panics if the application state has not been initialized. -pub fn app_reader() -> application::lock::Reader { - APPLICATION.read() -} +/// Includes `git describe` build metatata if available: +/// - the number of commits since the last version tag, and +/// - the git commit. +/// +/// For details, see +pub fn build_version() -> Version { + // CARGO_PKG_VERSION is always a valid SemVer 2.0 version. + const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); -/// Obtain an exclusive mutable lock on the application state. -pub fn app_writer() -> application::lock::Writer { - APPLICATION.write() -} + // We're using the same library as cargo uses internally, so this is guaranteed. + let fallback_version = CARGO_PKG_VERSION.parse().unwrap_or_else(|error| { + panic!( + "unexpected invalid CARGO_PKG_VERSION: {error:?} in {CARGO_PKG_VERSION:?}, \ + should have been checked by cargo" + ) + }); -/// Obtain a read-only (multi-reader) lock on the application configuration. -/// -/// Panics if the application configuration has not been loaded. -pub fn app_config() -> config::Reader { - config::Reader::new(&APPLICATION) + vergen_build_version().unwrap_or(fallback_version) } -/// Returns the zebrad version for this build, in SemVer 2.0 format. -/// -/// Includes the git commit and the number of commits since the last version -/// tag, if available. -/// -/// For details, see -pub fn app_version() -> Version { - const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); - let vergen_git_describe: Option<&str> = option_env!("VERGEN_GIT_DESCRIBE"); - - match vergen_git_describe { - // change the git describe format to the semver 2.0 format - Some(mut vergen_git_describe) if !vergen_git_describe.is_empty() => { - // strip the leading "v", if present - if &vergen_git_describe[0..1] == "v" { - vergen_git_describe = &vergen_git_describe[1..]; - } +/// Returns the `zebrad` version from this build, if available from `vergen`. +fn vergen_build_version() -> Option { + // VERGEN_GIT_DESCRIBE should be in the format: + // - v1.0.0-rc.9-6-g319b01bb84 + // - v1.0.0-6-g319b01bb84 + // but sometimes it is just a short commit hash. See #6879 for details. + // + // Currently it is the output of `git describe --tags --dirty --match='v*.*.*'`, + // or whatever is specified in zebrad/build.rs. + const VERGEN_GIT_DESCRIBE: Option<&str> = option_env!("VERGEN_GIT_DESCRIBE"); + + // The SemVer 2.0 format is: + // - 1.0.0-rc.9+6.g319b01bb84 + // - 1.0.0+6.g319b01bb84 + // + // Or as a pattern: + // - version: major`.`minor`.`patch + // - optional pre-release: `-`tag[`.`tag ...] + // - optional build: `+`tag[`.`tag ...] + // change the git describe format to the semver 2.0 format + let Some(vergen_git_describe) = VERGEN_GIT_DESCRIBE else { + return None; + }; + + // `git describe` uses "dirty" for uncommitted changes, + // but users won't understand what that means. + let vergen_git_describe = vergen_git_describe.replace("dirty", "modified"); + + // Split using "git describe" separators. + let mut vergen_git_describe = vergen_git_describe.split('-').peekable(); + + // Check the "version core" part. + let version = vergen_git_describe.next(); + let Some(mut version) = version else { + return None; + }; + + // strip the leading "v", if present. + version = version.strip_prefix('v').unwrap_or(version); + + // If the initial version is empty, just a commit hash, or otherwise invalid. + if Version::parse(version).is_err() { + return None; + } - // split into tag, commit count, hash - let rparts: Vec<_> = vergen_git_describe.rsplitn(3, '-').collect(); - - match rparts.as_slice() { - // assume it's a cargo package version or a git tag with no hash - [_] | [_, _] => vergen_git_describe.parse().unwrap_or_else(|_| { - panic!( - "VERGEN_GIT_DESCRIBE without a hash {vergen_git_describe:?} must be valid semver 2.0" - ) - }), - - // it's the "git describe" format, which doesn't quite match SemVer 2.0 - [hash, commit_count, tag] => { - let semver_fix = format!("{tag}+{commit_count}.{hash}"); - semver_fix.parse().unwrap_or_else(|_| - panic!("Modified VERGEN_GIT_DESCRIBE {vergen_git_describe:?} -> {rparts:?} -> {semver_fix:?} must be valid. Note: CARGO_PKG_VERSION was {CARGO_PKG_VERSION:?}.")) - } + let mut semver = version.to_string(); - _ => unreachable!("split is limited to 3 parts"), - } - } - _ => CARGO_PKG_VERSION.parse().unwrap_or_else(|_| { - panic!("CARGO_PKG_VERSION {CARGO_PKG_VERSION:?} must be valid semver 2.0") - }), + // Check if the next part is a pre-release or build part, + // but only consume it if it is a pre-release tag. + let Some(part) = vergen_git_describe.peek() else { + // No pre-release or build. + return semver.parse().ok(); + }; + + if part.starts_with(char::is_alphabetic) { + // It's a pre-release tag. + semver.push('-'); + semver.push_str(part); + + // Consume the pre-release tag to move on to the build tags, if any. + let _ = vergen_git_describe.next(); + } + + // Check if the next part is a build part. + let Some(build) = vergen_git_describe.peek() else { + // No build tags. + return semver.parse().ok(); + }; + + if !build.starts_with(char::is_numeric) { + // It's not a valid "commit count" build tag from "git describe". + return None; } + + // Append the rest of the build parts with the correct `+` and `.` separators. + let build_parts: Vec<_> = vergen_git_describe.collect(); + let build_parts = build_parts.join("."); + + semver.push('+'); + semver.push_str(&build_parts); + + semver.parse().ok() } -/// The Zebra current release version. -pub fn release_version() -> String { - app_version() - .to_string() - .split('+') - .next() - .expect("always at least 1 slice") - .to_string() +/// The Zebra current release version, without any build metadata. +pub fn release_version() -> Version { + let mut release_version = build_version(); + + release_version.build = BuildMetadata::EMPTY; + + release_version } /// The User-Agent string provided by the node. @@ -117,21 +158,16 @@ pub fn user_agent() -> String { } /// Zebrad Application -#[derive(Debug)] +#[derive(Debug, Default)] pub struct ZebradApp { /// Application configuration. - config: Option, + config: CfgCell, /// Application state. state: application::State, } impl ZebradApp { - /// Are standard output and standard error both connected to ttys? - fn outputs_are_ttys() -> bool { - atty::is(atty::Stream::Stdout) && atty::is(atty::Stream::Stderr) - } - /// Returns the git commit for this build, if available. /// /// @@ -147,21 +183,6 @@ impl ZebradApp { } } -/// Initialize a new application instance. -/// -/// By default no configuration is loaded, and the framework state is -/// initialized to a default, empty state (no components, threads, etc). -#[allow(unknown_lints)] -#[allow(clippy::derivable_impls)] -impl Default for ZebradApp { - fn default() -> Self { - Self { - config: None, - state: application::State::default(), - } - } -} - impl Application for ZebradApp { /// Entrypoint command for this application. type Cmd = EntryPoint; @@ -173,8 +194,8 @@ impl Application for ZebradApp { type Paths = StandardPaths; /// Accessor for application configuration. - fn config(&self) -> &ZebradConfig { - self.config.as_ref().expect("config not loaded") + fn config(&self) -> Arc { + self.config.read() } /// Borrow the application state immutably. @@ -182,34 +203,21 @@ impl Application for ZebradApp { &self.state } - /// Borrow the application state mutably. - fn state_mut(&mut self) -> &mut application::State { - &mut self.state - } - /// Returns the framework components used by this application. fn framework_components( &mut self, - command: &Self::Cmd, + _command: &Self::Cmd, ) -> Result>>, FrameworkError> { - // Automatically use color if we're outputting to a terminal + // TODO: Open a PR in abscissa to add a TerminalBuilder for opting out + // of the `color_eyre::install` part of `Terminal::new` without + // ColorChoice::Never? + + // The Tracing component uses stdout directly and will apply colors automatically. // - // The `abcissa` docs claim that abscissa implements `Auto`, but it - // does not - except in `color_backtrace` backtraces. - let mut term_colors = self.term_colors(command); - if term_colors == ColorChoice::Auto { - // We want to disable colors on a per-stream basis, but that feature - // can only be implemented inside the terminal component streams. - // Instead, if either output stream is not a terminal, disable - // colors. - // - // We'd also like to check `config.tracing.use_color` here, but the - // config has not been loaded yet. - if !Self::outputs_are_ttys() { - term_colors = ColorChoice::Never; - } - } - let terminal = Terminal::new(term_colors); + // Note: It's important to use `ColorChoice::Never` here to avoid panicking in + // `register_components()` below if `color_eyre::install()` is called + // after `color_spantrace` has been initialized. + let terminal = Terminal::new(ColorChoice::Never); Ok(vec![Box::new(terminal)]) } @@ -229,10 +237,12 @@ impl Application for ZebradApp { let mut components = self.framework_components(command)?; // Load config *after* framework components so that we can - // report an error to the terminal if it occurs. + // report an error to the terminal if it occurs (unless used with a command that doesn't need the config). let config = match command.config_path() { Some(path) => match self.load_config(&path) { Ok(config) => config, + // Ignore errors loading the config for some commands. + Err(_e) if command.cmd().should_ignore_load_config_error() => Default::default(), Err(e) => { status_err!("Zebra could not parse the provided config file. This might mean you are using a deprecated format of the file. You can generate a valid config by running \"zebrad generate\", and diff it against yours to examine any format inconsistencies."); return Err(e); @@ -243,7 +253,7 @@ impl Application for ZebradApp { let config = command.process_config(config)?; - let theme = if Self::outputs_are_ttys() && config.tracing.use_color { + let theme = if config.tracing.use_color_stdout_and_stderr() { color_eyre::config::Theme::dark() } else { color_eyre::config::Theme::new() @@ -252,13 +262,32 @@ impl Application for ZebradApp { // collect the common metadata for the issue URL and panic report, // skipping any env vars that aren't present + // reads state disk version file, doesn't open RocksDB database + let disk_db_version = + match database_format_version_on_disk(&config.state, config.network.network) { + Ok(Some(version)) => version.to_string(), + // This "version" is specially formatted to match a relaxed version regex in CI + Ok(None) => "creating.new.database".to_string(), + Err(error) => { + let mut error = format!("error: {error:?}"); + error.truncate(100); + error + } + }; + let app_metadata = vec![ - // cargo or git tag + short commit - ("version", app_version().to_string()), + // build-time constant: cargo or git tag + short commit + ("version", build_version().to_string()), // config ("Zcash network", config.network.network.to_string()), - // constants - ("state version", DATABASE_FORMAT_VERSION.to_string()), + // code constant + ( + "running state version", + database_format_version_in_code().to_string(), + ), + // state disk file, doesn't open database + ("initial disk state version", disk_db_version), + // build-time constant ("features", env!("VERGEN_CARGO_FEATURES").to_string()), ]; @@ -362,7 +391,7 @@ impl Application for ZebradApp { #[cfg(feature = "sentry")] let guard = sentry::init(sentry::ClientOptions { debug: true, - release: Some(app_version().to_string().into()), + release: Some(build_version().to_string().into()), ..Default::default() }); @@ -394,23 +423,9 @@ impl Application for ZebradApp { .build_global() .expect("unable to initialize rayon thread pool"); - self.config = Some(config); - - let cfg_ref = self - .config - .as_ref() - .expect("config is loaded before register_components"); - - let default_filter = command - .command - .as_ref() - .map(|zcmd| zcmd.default_tracing_filter(command.verbose, command.help)) - .unwrap_or("warn"); - let is_server = command - .command - .as_ref() - .map(ZebradCmd::is_server) - .unwrap_or(false); + let cfg_ref = &config; + let default_filter = command.cmd().default_tracing_filter(command.verbose); + let is_server = command.cmd().is_server(); // Ignore the configured tracing filter for short-lived utility commands let mut tracing_config = cfg_ref.tracing.clone(); @@ -425,7 +440,11 @@ impl Application for ZebradApp { tracing_config.filter = Some(default_filter.to_owned()); tracing_config.flamegraph = None; } - components.push(Box::new(Tracing::new(tracing_config)?)); + components.push(Box::new(Tracing::new( + config.network.network, + tracing_config, + command.cmd().uses_intro(), + )?)); // Log git metadata and platform info when zebrad starts up if is_server { @@ -436,7 +455,7 @@ impl Application for ZebradApp { // Activate the global span, so it's visible when we load the other // components. Space is at a premium here, so we use an empty message, // short commit hash, and the unique part of the network name. - let net = &self.config.clone().unwrap().network.network.to_string()[..4]; + let net = &config.network.network.to_string()[..4]; let global_span = if let Some(git_commit) = ZebradApp::git_commit() { error_span!("", zebrad = git_commit, net) } else { @@ -459,7 +478,10 @@ impl Application for ZebradApp { components.push(Box::new(MetricsEndpoint::new(&metrics_config)?)); } - self.state.components.register(components) + self.state.components_mut().register(components)?; + + // Fire callback to signal state in the application lifecycle + self.after_config(config) } /// Load this application's configuration and initialize its components. @@ -468,16 +490,7 @@ impl Application for ZebradApp { // Create and register components with the application. // We do this first to calculate a proper dependency ordering before // application configuration is processed - self.register_components(command)?; - - // Fire callback to signal state in the application lifecycle - let config = self - .config - .take() - .expect("register_components always populates the config"); - self.after_config(config)?; - - Ok(()) + self.register_components(command) } /// Post-configuration lifecycle callback. @@ -487,13 +500,13 @@ impl Application for ZebradApp { /// possible. fn after_config(&mut self, config: Self::Cfg) -> Result<(), FrameworkError> { // Configure components - self.state.components.after_config(&config)?; - self.config = Some(config); + self.state.components_mut().after_config(&config)?; + self.config.set_once(config); Ok(()) } - fn shutdown(&mut self, shutdown: Shutdown) -> ! { + fn shutdown(&self, shutdown: Shutdown) -> ! { // Some OSes require a flush to send all output to the terminal. // zebrad's logging uses Abscissa, so we flush its streams. // @@ -503,25 +516,33 @@ impl Application for ZebradApp { let _ = stdout().lock().flush(); let _ = stderr().lock().flush(); - if let Err(e) = self.state().components.shutdown(self, shutdown) { - let app_name = self.name().to_string(); + let shutdown_result = self.state().components().shutdown(self, shutdown); + + self.state() + .components_mut() + .get_downcast_mut::() + .map(Tracing::shutdown); - // Swap out a fake app so we can trigger the destructor on the original - let _ = std::mem::take(self); + if let Err(e) = shutdown_result { + let app_name = self.name().to_string(); fatal_error(app_name, &e); } - // Swap out a fake app so we can trigger the destructor on the original - let _ = std::mem::take(self); - match shutdown { Shutdown::Graceful => process::exit(0), Shutdown::Forced => process::exit(1), Shutdown::Crash => process::exit(2), } } +} - fn version(&self) -> Version { - app_version() - } +/// Boot the given application, parsing subcommand and options from +/// command-line arguments, and terminating when complete. +// +pub fn boot(app_cell: &'static AppCell) -> ! { + let args = + EntryPoint::process_cli_args(env::args_os().collect()).unwrap_or_else(|err| err.exit()); + + ZebradApp::run(app_cell, args); + process::exit(0); } diff --git a/zebrad/src/application/entry_point.rs b/zebrad/src/application/entry_point.rs deleted file mode 100644 index 16f262d1394..00000000000 --- a/zebrad/src/application/entry_point.rs +++ /dev/null @@ -1,111 +0,0 @@ -//! Zebrad EntryPoint - -use crate::{ - commands::{StartCmd, ZebradCmd}, - config::ZebradConfig, -}; - -use std::path::PathBuf; - -use abscissa_core::{ - command::{Command, Usage}, - config::Configurable, - FrameworkError, Options, Runnable, -}; - -// (See https://docs.rs/abscissa_core/0.5.2/src/abscissa_core/command/entrypoint.rs.html) -/// Toplevel entrypoint command. -/// -/// Handles obtaining toplevel help as well as verbosity settings. -#[derive(Debug, Options)] -pub struct EntryPoint { - /// Path to the configuration file - #[options(short = "c", help = "path to configuration file")] - pub config: Option, - - /// Obtain help about the current command - #[options(short = "h", help = "print help message")] - pub help: bool, - - /// Increase verbosity setting - #[options(short = "v", help = "be verbose")] - pub verbose: bool, - - /// Subcommand to execute. - /// - /// The `command` option will delegate option parsing to the command type, - /// starting at the first free argument. Defaults to start. - #[options(command, default_expr = "Some(ZebradCmd::Start(StartCmd::default()))")] - pub command: Option, -} - -impl EntryPoint { - /// Borrow the underlying command type - fn command(&self) -> &ZebradCmd { - if self.help { - let _ = Usage::for_command::().print_info(); - let _ = Usage::for_command::().print_usage(); - let _ = Usage::for_command::().print_usage(); - std::process::exit(0); - } - - self.command - .as_ref() - .expect("Some(ZebradCmd::Start(StartCmd::default()) as default value") - } -} - -impl Runnable for EntryPoint { - fn run(&self) { - self.command().run() - } -} - -impl Command for EntryPoint { - /// Name of this program as a string - fn name() -> &'static str { - ZebradCmd::name() - } - - /// Description of this program - fn description() -> &'static str { - ZebradCmd::description() - } - - /// Version of this program - fn version() -> &'static str { - ZebradCmd::version() - } - - /// Authors of this program - fn authors() -> &'static str { - ZebradCmd::authors() - } - - /// Get usage information for a particular subcommand (if available) - fn subcommand_usage(command: &str) -> Option { - ZebradCmd::subcommand_usage(command) - } -} - -impl Configurable for EntryPoint { - /// Path to the command's configuration file - fn config_path(&self) -> Option { - match &self.config { - // Use explicit `-c`/`--config` argument if passed - Some(cfg) => Some(cfg.clone()), - - // Otherwise defer to the toplevel command's config path logic - None => self.command.as_ref().and_then(|cmd| cmd.config_path()), - } - } - - /// Process the configuration after it has been loaded, potentially - /// modifying it or returning an error if options are incompatible - fn process_config(&self, config: ZebradConfig) -> Result { - match &self.command { - Some(cmd) => cmd.process_config(config), - None => Ok(config), - } - } -} diff --git a/zebrad/src/bin/zebrad/main.rs b/zebrad/src/bin/zebrad/main.rs index 962be2407a6..914fcc256b0 100644 --- a/zebrad/src/bin/zebrad/main.rs +++ b/zebrad/src/bin/zebrad/main.rs @@ -1,8 +1,8 @@ //! Main entry point for Zebrad -use zebrad::application::APPLICATION; +use zebrad::application::{boot, APPLICATION}; /// Process entry point for `zebrad` fn main() { - abscissa_core::boot(&APPLICATION); + boot(&APPLICATION); } diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index a306a5ab840..d7f4fa337be 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -2,62 +2,49 @@ mod copy_state; mod download; +mod entry_point; mod generate; mod start; mod tip_height; -mod version; + +#[cfg(test)] +mod tests; use self::ZebradCmd::*; use self::{ copy_state::CopyStateCmd, download::DownloadCmd, generate::GenerateCmd, - tip_height::TipHeightCmd, version::VersionCmd, + tip_height::TipHeightCmd, }; -pub use self::start::StartCmd; +pub use self::{entry_point::EntryPoint, start::StartCmd}; use crate::config::ZebradConfig; -use abscissa_core::{ - config::Override, Command, Configurable, FrameworkError, Help, Options, Runnable, -}; +use abscissa_core::{config::Override, Command, Configurable, FrameworkError, Runnable}; use std::path::PathBuf; /// Zebrad Configuration Filename pub const CONFIG_FILE: &str = "zebrad.toml"; /// Zebrad Subcommands -#[derive(Command, Debug, Options)] +#[derive(Command, Debug, clap::Subcommand)] pub enum ZebradCmd { - /// The `copy-state` subcommand, used to debug cached chain state + /// The `copy-state` subcommand, used to debug cached chain state (expert users only) // TODO: hide this command from users in release builds (#3279) - #[options(help = "copy cached chain state (debug only)")] CopyState(CopyStateCmd), - /// The `download` subcommand - #[options(help = "pre-download required parameter files")] + // The `download` subcommand + /// Pre-download required Zcash Sprout and Sapling parameter files Download(DownloadCmd), - /// The `generate` subcommand - #[options(help = "generate a skeleton configuration")] + /// Generate a default `zebrad.toml` configuration Generate(GenerateCmd), - /// The `help` subcommand - #[options(help = "get usage information, \ - use help for subcommand usage information, \ - or --help flag to see top-level options")] - Help(Help), - - /// The `start` subcommand - #[options(help = "start the application")] + /// Start the application (default command) Start(StartCmd), - /// The `tip-height` subcommand - #[options(help = "get the block height of Zebra's persisted chain state")] + /// Print the tip block height of Zebra's chain state on disk TipHeight(TipHeightCmd), - - /// The `version` subcommand - #[options(help = "display version information")] - Version(VersionCmd), } impl ZebradCmd { @@ -73,27 +60,46 @@ impl ZebradCmd { CopyState(_) | Start(_) => true, // Utility commands that don't use server components - Download(_) | Generate(_) | Help(_) | TipHeight(_) | Version(_) => false, + Download(_) | Generate(_) | TipHeight(_) => false, + } + } + + /// Returns true if this command shows the Zebra intro logo and text. + /// + /// For example, `Start` acts as a Zcash node. + pub(crate) fn uses_intro(&self) -> bool { + // List all the commands, so new commands have to make a choice here + match self { + // Commands that need an intro + Start(_) => true, + + // Utility commands + CopyState(_) | Download(_) | Generate(_) | TipHeight(_) => false, } } + /// Returns true if this command should ignore errors when + /// attempting to load a config file. + pub(crate) fn should_ignore_load_config_error(&self) -> bool { + matches!(self, ZebradCmd::Generate(_) | ZebradCmd::Download(_)) + } + /// Returns the default log level for this command, based on the `verbose` command line flag. /// /// Some commands need to be quiet by default. - pub(crate) fn default_tracing_filter(&self, verbose: bool, help: bool) -> &'static str { + pub(crate) fn default_tracing_filter(&self, verbose: bool) -> &'static str { let only_show_warnings = match self { // Commands that generate quiet output by default. // This output: // - is used by automated tools, or // - needs to be read easily. - Generate(_) | TipHeight(_) | Help(_) | Version(_) => true, + Generate(_) | TipHeight(_) => true, // Commands that generate informative logging output by default. CopyState(_) | Download(_) | Start(_) => false, }; - // set to warn so that usage info is printed without info-level logs from component registration - if help || (only_show_warnings && !verbose) { + if only_show_warnings && !verbose { "warn" } else if only_show_warnings || !verbose { "info" @@ -109,10 +115,8 @@ impl Runnable for ZebradCmd { CopyState(cmd) => cmd.run(), Download(cmd) => cmd.run(), Generate(cmd) => cmd.run(), - ZebradCmd::Help(cmd) => cmd.run(), Start(cmd) => cmd.run(), TipHeight(cmd) => cmd.run(), - Version(cmd) => cmd.run(), } } } diff --git a/zebrad/src/commands/copy_state.rs b/zebrad/src/commands/copy_state.rs index 2466ed7b2e4..ffe9575ddec 100644 --- a/zebrad/src/commands/copy_state.rs +++ b/zebrad/src/commands/copy_state.rs @@ -35,7 +35,7 @@ use std::{cmp::min, path::PathBuf}; -use abscissa_core::{config, Command, FrameworkError, Options, Runnable}; +use abscissa_core::{config, Command, FrameworkError, Runnable}; use color_eyre::eyre::{eyre, Report}; use tokio::time::Instant; use tower::{Service, ServiceExt}; @@ -45,6 +45,7 @@ use zebra_state as old_zs; use zebra_state as new_zs; use crate::{ + application::ZebradApp, components::tokio::{RuntimeRun, TokioComponent}, config::ZebradConfig, prelude::*, @@ -54,11 +55,11 @@ use crate::{ /// How often we log info-level progress messages const PROGRESS_HEIGHT_INTERVAL: u32 = 5_000; -/// `copy-state` subcommand -#[derive(Command, Debug, Options)] +/// copy cached chain state (expert users only) +#[derive(Command, Debug, clap::Parser)] pub struct CopyStateCmd { /// Source height that the copy finishes at. - #[options(help = "stop copying at this source height")] + #[clap(long, short, help = "stop copying at this source height")] max_source_height: Option, /// Path to a Zebra config.toml for the target state. @@ -66,26 +67,30 @@ pub struct CopyStateCmd { /// /// Zebra only uses the state options from this config. /// All other options are ignored. - #[options(help = "config file path for the target state (default: ephemeral), \ - the source state uses the main zebrad config")] + #[clap( + long, + short, + help = "config file path for the target state (default: ephemeral), \ + the source state uses the main zebrad config" + )] target_config_path: Option, /// Filter strings which override the config file and defaults - #[options(free, help = "tracing filters which override the zebrad.toml config")] + #[clap(help = "tracing filters which override the zebrad.toml config")] filters: Vec, } impl CopyStateCmd { /// Configure and launch the copy command async fn start(&self) -> Result<(), Report> { - let base_config = app_config().clone(); + let base_config = APPLICATION.config(); let source_config = base_config.state.clone(); // The default load_config impl doesn't actually modify the app config. let target_config = self .target_config_path .as_ref() - .map(|path| app_writer().load_config(path)) + .map(|path| ZebradApp::default().load_config(path)) .transpose()? .map(|app_config| app_config.state) .unwrap_or_else(new_zs::Config::ephemeral); @@ -230,7 +235,7 @@ impl CopyStateCmd { let target_block_commit_hash = target_state .ready() .await? - .call(new_zs::Request::CommitFinalizedBlock( + .call(new_zs::Request::CommitCheckpointVerifiedBlock( source_block.clone().into(), )) .await?; @@ -240,7 +245,7 @@ impl CopyStateCmd { target_block_commit_hash } response => Err(format!( - "unexpected response to CommitFinalizedBlock request, height: {height}\n \ + "unexpected response to CommitCheckpointVerifiedBlock request, height: {height}\n \ response: {response:?}", ))?, }; @@ -394,9 +399,9 @@ impl Runnable for CopyStateCmd { target_config_path = ?self.target_config_path, "starting cached chain state copy" ); - let rt = app_writer() - .state_mut() - .components + let rt = APPLICATION + .state() + .components_mut() .get_downcast_mut::() .expect("TokioComponent should be available") .rt diff --git a/zebrad/src/commands/download.rs b/zebrad/src/commands/download.rs index 83881c07140..4feefcb9e58 100644 --- a/zebrad/src/commands/download.rs +++ b/zebrad/src/commands/download.rs @@ -5,10 +5,10 @@ //! This command should be used if you're launching lots of `zebrad start` instances for testing, //! or you want to include the parameter files in a distribution package. -use abscissa_core::{Command, Options, Runnable}; +use abscissa_core::{Command, Runnable}; -/// `download` subcommand -#[derive(Command, Debug, Default, Options)] +/// Pre-download required Zcash Sprout and Sapling parameter files +#[derive(Command, Debug, Default, clap::Parser)] pub struct DownloadCmd {} impl DownloadCmd { diff --git a/zebrad/src/commands/entry_point.rs b/zebrad/src/commands/entry_point.rs new file mode 100644 index 00000000000..3955dadb35a --- /dev/null +++ b/zebrad/src/commands/entry_point.rs @@ -0,0 +1,127 @@ +//! Zebrad EntryPoint + +use abscissa_core::{Command, Configurable, FrameworkError, Runnable}; +use clap::Parser; +use std::{ffi::OsString, path::PathBuf}; + +use crate::config::ZebradConfig; + +use super::ZebradCmd; + +/// Toplevel entrypoint command. +/// +/// Handles obtaining toplevel help as well as verbosity settings. +#[derive(Debug, clap::Parser)] +#[clap( + version = clap::crate_version!(), + author="Zcash Foundation ", + help_template = "\ +{name} {version}\n +{author}\n +{usage-heading} {usage}\n +{all-args}\ +" +)] +pub struct EntryPoint { + /// Subcommand to execute. + /// + /// The `command` option will delegate option parsing to the command type, + /// starting at the first free argument. Defaults to start. + #[clap(subcommand)] + pub cmd: Option, + + /// Path to the configuration file + #[clap(long, short, help = "path to configuration file")] + pub config: Option, + + /// Increase verbosity setting + #[clap(long, short, help = "be verbose")] + pub verbose: bool, + + /// Filter strings which override the config file and defaults + // This can be applied to the default start command if no subcommand is provided. + #[clap(long, help = "tracing filters which override the zebrad.toml config")] + filters: Vec, +} + +impl EntryPoint { + /// Borrow the command in the option + /// + /// # Panics + /// + /// If `cmd` is None + pub fn cmd(&self) -> &ZebradCmd { + self.cmd + .as_ref() + .expect("should default to start if not provided") + } + + /// Returns a string that parses to the default subcommand + pub fn default_cmd_as_str() -> &'static str { + "start" + } + + /// Checks if the provided arguments include a subcommand + fn should_add_default_subcommand(&self) -> bool { + self.cmd.is_none() + } + + /// Process command arguments and insert the default subcommand + /// if no subcommand is provided. + pub fn process_cli_args(mut args: Vec) -> clap::error::Result> { + let entry_point = EntryPoint::try_parse_from(&args)?; + + // Add the default subcommand to args after the top-level args if cmd is None + if entry_point.should_add_default_subcommand() { + args.push(EntryPoint::default_cmd_as_str().into()); + // This duplicates the top-level filters args, but the tracing component only checks `StartCmd.filters`. + for filter in entry_point.filters { + args.push(filter.into()) + } + } + + Ok(args) + } +} + +impl Runnable for EntryPoint { + fn run(&self) { + self.cmd().run() + } +} + +impl Command for EntryPoint { + /// Name of this program as a string + fn name() -> &'static str { + ZebradCmd::name() + } + + /// Description of this program + fn description() -> &'static str { + ZebradCmd::description() + } + + /// Authors of this program + fn authors() -> &'static str { + ZebradCmd::authors() + } +} + +impl Configurable for EntryPoint { + /// Path to the command's configuration file + fn config_path(&self) -> Option { + match &self.config { + // Use explicit `-c`/`--config` argument if passed + Some(cfg) => Some(cfg.clone()), + + // Otherwise defer to the toplevel command's config path logic + None => self.cmd().config_path(), + } + } + + /// Process the configuration after it has been loaded, potentially + /// modifying it or returning an error if options are incompatible + fn process_config(&self, config: ZebradConfig) -> Result { + self.cmd().process_config(config) + } +} diff --git a/zebrad/src/commands/generate.rs b/zebrad/src/commands/generate.rs index 649e029a171..de9a3019c53 100644 --- a/zebrad/src/commands/generate.rs +++ b/zebrad/src/commands/generate.rs @@ -1,13 +1,20 @@ -//! `generate` subcommand - generates a skeleton config. +//! `generate` subcommand - generates a default `zebrad.toml` config. use crate::config::ZebradConfig; -use abscissa_core::{Command, Options, Runnable}; +use abscissa_core::{Command, Runnable}; +use clap::Parser; -/// `generate` subcommand -#[derive(Command, Debug, Options)] +/// Generate a default `zebrad.toml` configuration +#[derive(Command, Debug, Default, Parser)] pub struct GenerateCmd { /// The file to write the generated config to. - #[options(help = "The file to write the generated config to (stdout if unspecified)")] + // + // TODO: use PathBuf here instead, to support non-UTF-8 paths + #[clap( + long, + short, + help = "The file to write the generated config to (stdout if unspecified)" + )] output_file: Option, } diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index ab57713a3cc..3e8e18230ec 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -2,7 +2,7 @@ //! //! ## Application Structure //! -//! A zebra node consists of the following services and tasks: +//! A zebra node consists of the following major services and tasks: //! //! Peers: //! * Peer Connection Pool Service @@ -12,6 +12,9 @@ //! * maintains a list of peer addresses, and connection priority metadata //! * discovers new peer addresses from existing peer connections //! * initiates new outbound peer connections in response to demand from tasks within this node +//! * Peer Cache Service +//! * Reads previous peer cache on startup, and adds it to the configured DNS seed peers +//! * Periodically updates the peer cache on disk from the latest address book state //! //! Blocks & Mempool Transactions: //! * Consensus Service @@ -68,20 +71,20 @@ //! //! Some of the diagnostic features are optional, and need to be enabled at compile-time. -use abscissa_core::{config, Command, FrameworkError, Options, Runnable}; +use abscissa_core::{config, Command, FrameworkError, Runnable}; use color_eyre::eyre::{eyre, Report}; use futures::FutureExt; use tokio::{pin, select, sync::oneshot}; use tower::{builder::ServiceBuilder, util::BoxService}; use tracing_futures::Instrument; -use zebra_consensus::chain::BackgroundTaskHandles; +use zebra_consensus::router::BackgroundTaskHandles; use zebra_rpc::server::RpcServer; use crate::{ - application::{app_version, user_agent}, + application::{build_version, user_agent}, components::{ - inbound::{self, InboundSetupData}, + inbound::{self, InboundSetupData, MAX_INBOUND_RESPONSE_TIME}, mempool::{self, Mempool}, sync::{self, show_block_chain_progress, VERIFICATION_PIPELINE_SCALING_MULTIPLIER}, tokio::{RuntimeRun, TokioComponent}, @@ -91,20 +94,20 @@ use crate::{ prelude::*, }; -/// `start` subcommand -#[derive(Command, Debug, Options, Default)] +/// Start the application (default command) +#[derive(Command, Debug, Default, clap::Parser)] pub struct StartCmd { /// Filter strings which override the config file and defaults - #[options(free, help = "tracing filters which override the zebrad.toml config")] + #[clap(help = "tracing filters which override the zebrad.toml config")] filters: Vec, } impl StartCmd { async fn start(&self) -> Result<(), Report> { - let config = app_config().clone(); + let config = APPLICATION.config(); info!("initializing node state"); - let (_, max_checkpoint_height) = zebra_consensus::chain::init_checkpoint_list( + let (_, max_checkpoint_height) = zebra_consensus::router::init_checkpoint_list( config.consensus.clone(), config.network.network, ); @@ -129,10 +132,18 @@ impl StartCmd { // The service that our node uses to respond to requests by peers. The // load_shed middleware ensures that we reduce the size of the peer set // in response to excess load. + // + // # Security + // + // This layer stack is security-sensitive, modifying it can cause hangs, + // or enable denial of service attacks. + // + // See `zebra_network::Connection::drive_peer_request()` for details. let (setup_tx, setup_rx) = oneshot::channel(); let inbound = ServiceBuilder::new() .load_shed() .buffer(inbound::downloads::MAX_INBOUND_CONCURRENCY) + .timeout(MAX_INBOUND_RESPONSE_TIME) .service(Inbound::new( config.sync.full_verify_concurrency_limit, setup_rx, @@ -147,8 +158,8 @@ impl StartCmd { .await; info!("initializing verifiers"); - let (chain_verifier, tx_verifier, consensus_task_handles, max_checkpoint_height) = - zebra_consensus::chain::init( + let (block_verifier_router, tx_verifier, consensus_task_handles, max_checkpoint_height) = + zebra_consensus::router::init( config.consensus.clone(), config.network.network, state.clone(), @@ -161,7 +172,7 @@ impl StartCmd { &config, max_checkpoint_height, peer_set.clone(), - chain_verifier.clone(), + block_verifier_router.clone(), state.clone(), latest_chain_tip.clone(), ); @@ -186,7 +197,7 @@ impl StartCmd { let setup_data = InboundSetupData { address_book: address_book.clone(), block_download_peer_set: peer_set.clone(), - block_verifier: chain_verifier.clone(), + block_verifier: block_verifier_router.clone(), mempool: mempool.clone(), state, latest_chain_tip: latest_chain_tip.clone(), @@ -199,15 +210,16 @@ impl StartCmd { // Launch RPC server let (rpc_task_handle, rpc_tx_queue_task_handle, rpc_server) = RpcServer::spawn( - config.rpc, + config.rpc.clone(), #[cfg(feature = "getblocktemplate-rpcs")] - config.mining, + config.mining.clone(), #[cfg(not(feature = "getblocktemplate-rpcs"))] (), - app_version(), + build_version(), + user_agent(), mempool.clone(), read_only_state_service, - chain_verifier, + block_verifier_router, sync_status.clone(), address_book, latest_chain_tip.clone(), @@ -425,7 +437,7 @@ impl StartCmd { /// Returns the bound for the state service buffer, /// based on the configurations of the services that use the state concurrently. fn state_buffer_bound() -> usize { - let config = app_config().clone(); + let config = APPLICATION.config(); // Ignore the checkpoint verify limit, because it is very large. // @@ -447,9 +459,9 @@ impl Runnable for StartCmd { /// Start the application. fn run(&self) { info!("Starting zebrad"); - let rt = app_writer() - .state_mut() - .components + let rt = APPLICATION + .state() + .components_mut() .get_downcast_mut::() .expect("TokioComponent should be available") .rt diff --git a/zebrad/src/commands/tests.rs b/zebrad/src/commands/tests.rs new file mode 100644 index 00000000000..edd019a9022 --- /dev/null +++ b/zebrad/src/commands/tests.rs @@ -0,0 +1,47 @@ +//! Tests for parsing zebrad commands + +use clap::Parser; + +use crate::commands::ZebradCmd; + +use super::EntryPoint; + +#[test] +fn args_with_subcommand_pass_through() { + let test_cases = [ + (false, true, false, vec!["zebrad"]), + (false, true, true, vec!["zebrad", "-v"]), + (false, true, true, vec!["zebrad", "--verbose"]), + (true, false, false, vec!["zebrad", "-h"]), + (true, false, false, vec!["zebrad", "--help"]), + (false, true, false, vec!["zebrad", "start"]), + (false, true, true, vec!["zebrad", "-v", "start"]), + (false, true, false, vec!["zebrad", "--filters", "warn"]), + (true, false, false, vec!["zebrad", "warn"]), + (false, true, false, vec!["zebrad", "start", "warn"]), + (true, false, false, vec!["zebrad", "help", "warn"]), + ]; + + for (should_exit, should_be_start, should_be_verbose, args) in test_cases { + let args = EntryPoint::process_cli_args(args.iter().map(Into::into).collect()); + + if should_exit { + args.expect_err("parsing invalid args or 'help'/'--help' should return an error"); + continue; + } + + let args: Vec = args.expect("args should parse into EntryPoint"); + + let args = + EntryPoint::try_parse_from(args).expect("hardcoded args should parse successfully"); + + assert!(args.config.is_none(), "args.config should be none"); + assert!(args.cmd.is_some(), "args.cmd should not be none"); + assert_eq!( + args.verbose, should_be_verbose, + "process_cli_args should preserve top-level args" + ); + + assert_eq!(matches!(args.cmd(), ZebradCmd::Start(_)), should_be_start,); + } +} diff --git a/zebrad/src/commands/tip_height.rs b/zebrad/src/commands/tip_height.rs index bc045175d4c..8ace683d5ac 100644 --- a/zebrad/src/commands/tip_height.rs +++ b/zebrad/src/commands/tip_height.rs @@ -5,7 +5,8 @@ use std::path::PathBuf; -use abscissa_core::{Command, Options, Runnable}; +use abscissa_core::{Application, Command, Runnable}; +use clap::Parser; use color_eyre::eyre::{eyre, Result}; use zebra_chain::{ @@ -15,17 +16,17 @@ use zebra_chain::{ }; use zebra_state::LatestChainTip; -use crate::prelude::app_config; +use crate::prelude::APPLICATION; -/// `zebra-tip-height` subcommand -#[derive(Command, Debug, Options)] +/// Print the tip block height of Zebra's chain state on disk +#[derive(Command, Debug, Default, Parser)] pub struct TipHeightCmd { /// Path to Zebra's cached state. - #[options(help = "path to directory with the Zebra chain state")] + #[clap(long, short, help = "path to directory with the Zebra chain state")] cache_dir: Option, /// The network to obtain the chain tip. - #[options(default = "mainnet", help = "the network of the chain to load")] + #[clap(long, short, help = "the network of the chain to load")] network: Network, } @@ -54,7 +55,7 @@ impl TipHeightCmd { /// Starts a state service using the `cache_dir` and `network` from the provided arguments. fn load_latest_chain_tip(&self) -> LatestChainTip { - let mut config = app_config().state.clone(); + let mut config = APPLICATION.config().state.clone(); if let Some(cache_dir) = self.cache_dir.clone() { config.cache_dir = cache_dir; diff --git a/zebrad/src/commands/version.rs b/zebrad/src/commands/version.rs deleted file mode 100644 index 047b9b12a75..00000000000 --- a/zebrad/src/commands/version.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! `version` subcommand - -#![allow(clippy::never_loop)] - -use super::ZebradCmd; -use abscissa_core::{Command, Options, Runnable}; - -/// `version` subcommand -#[derive(Command, Debug, Default, Options)] -pub struct VersionCmd {} - -impl Runnable for VersionCmd { - /// Print version message - #[allow(clippy::print_stdout)] - fn run(&self) { - println!("{} {}", ZebradCmd::name(), ZebradCmd::version()); - } -} diff --git a/zebrad/src/components/inbound.rs b/zebrad/src/components/inbound.rs index f3029ec5d2e..e93aa8517f0 100644 --- a/zebrad/src/components/inbound.rs +++ b/zebrad/src/components/inbound.rs @@ -11,6 +11,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + time::Duration, }; use chrono::Utc; @@ -18,6 +19,7 @@ use futures::{ future::{FutureExt, TryFutureExt}, stream::Stream, }; +use num_integer::div_ceil; use tokio::sync::oneshot::{self, error::TryRecvError}; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service, ServiceExt}; @@ -29,7 +31,7 @@ use zebra_chain::{ serialization::ZcashSerialize, transaction::UnminedTxId, }; -use zebra_consensus::chain::VerifyChainError; +use zebra_consensus::router::RouterError; use zebra_network::{ constants::{ADDR_RESPONSE_LIMIT_DENOMINATOR, MAX_ADDRS_IN_MESSAGE}, AddressBook, InventoryResponse, @@ -50,6 +52,12 @@ mod tests; use downloads::Downloads as BlockDownloads; +/// The maximum amount of time an inbound service response can take. +/// +/// If the response takes longer than this time, it will be cancelled, +/// and the peer might be disconnected. +pub const MAX_INBOUND_RESPONSE_TIME: Duration = Duration::from_secs(5); + /// The number of bytes the [`Inbound`] service will queue in response to a single block or /// transaction request, before ignoring any additional block or transaction IDs in that request. /// @@ -73,12 +81,12 @@ type BlockDownloadPeerSet = Buffer, zn::Request>; type State = Buffer, zs::Request>; type Mempool = Buffer, mempool::Request>; -type BlockVerifier = Buffer< - BoxService, +type SemanticBlockVerifier = Buffer< + BoxService, zebra_consensus::Request, >; type GossipedBlockDownloads = - BlockDownloads, Timeout, State>; + BlockDownloads, Timeout, State>; /// The services used by the [`Inbound`] service. pub struct InboundSetupData { @@ -91,7 +99,7 @@ pub struct InboundSetupData { /// A service that verifies downloaded blocks. /// /// Given to `Inbound.block_downloads` after the required services are set up. - pub block_verifier: BlockVerifier, + pub block_verifier: SemanticBlockVerifier, /// A service that manages transactions in the memory pool. pub mempool: Mempool, @@ -374,10 +382,7 @@ impl Service for Inbound { let mut peers = peers.sanitized(now); // Truncate the list - // - // TODO: replace with div_ceil once it stabilises - // https://github.com/rust-lang/rust/issues/88581 - let address_limit = (peers.len() + ADDR_RESPONSE_LIMIT_DENOMINATOR - 1) / ADDR_RESPONSE_LIMIT_DENOMINATOR; + let address_limit = div_ceil(peers.len(), ADDR_RESPONSE_LIMIT_DENOMINATOR); let address_limit = MAX_ADDRS_IN_MESSAGE.min(address_limit); peers.truncate(address_limit); diff --git a/zebrad/src/components/inbound/downloads.rs b/zebrad/src/components/inbound/downloads.rs index aa8b2cf6c25..11200e66435 100644 --- a/zebrad/src/components/inbound/downloads.rs +++ b/zebrad/src/components/inbound/downloads.rs @@ -49,7 +49,7 @@ type BoxError = Box; /// Since Zebra keeps an `inv` index, inbound downloads for malicious blocks /// will be directed to the malicious node that originally gossiped the hash. /// Therefore, this attack can be carried out by a single malicious node. -pub const MAX_INBOUND_CONCURRENCY: usize = 20; +pub const MAX_INBOUND_CONCURRENCY: usize = 30; /// The action taken in response to a peer's gossiped block hash. pub enum DownloadAction { diff --git a/zebrad/src/components/inbound/tests/fake_peer_set.rs b/zebrad/src/components/inbound/tests/fake_peer_set.rs index 55816fd4eb9..1a383bb90b3 100644 --- a/zebrad/src/components/inbound/tests/fake_peer_set.rs +++ b/zebrad/src/components/inbound/tests/fake_peer_set.rs @@ -23,7 +23,9 @@ use zebra_chain::{ transaction::{UnminedTx, UnminedTxId, VerifiedUnminedTx}, }; use zebra_consensus::{error::TransactionError, transaction, Config as ConsensusConfig}; -use zebra_network::{AddressBook, InventoryResponse, Request, Response}; +use zebra_network::{ + constants::DEFAULT_MAX_CONNS_PER_IP, AddressBook, InventoryResponse, Request, Response, +}; use zebra_node_services::mempool; use zebra_state::{ChainTipChange, Config as StateConfig, CHAIN_TIP_UPDATE_WAIT_LIMIT}; use zebra_test::mock_service::{MockService, PanicAssertion}; @@ -35,7 +37,7 @@ use crate::{ gossip_mempool_transaction_id, unmined_transactions_in_blocks, Config as MempoolConfig, Mempool, MempoolError, SameEffectsChainRejectionError, UnboxMempoolError, }, - sync::{self, BlockGossipError, SyncStatus, TIPS_RESPONSE_TIMEOUT}, + sync::{self, BlockGossipError, SyncStatus, PEER_GOSSIP_DELAY}, }, BoxError, }; @@ -117,13 +119,13 @@ async fn mempool_requests_for_transactions() { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); } @@ -208,13 +210,13 @@ async fn mempool_push_transaction() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -313,13 +315,13 @@ async fn mempool_advertise_transaction_ids() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -410,7 +412,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { .unwrap(); state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block_two.clone().into(), )) .await @@ -421,7 +423,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { hs.insert(tx1_id); // Transaction and Block IDs are gossipped, in any order, after waiting for the gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; let possible_requests = &mut [ Request::AdvertiseTransactionIds(hs), Request::AdvertiseBlock(block_two.hash()), @@ -483,14 +485,14 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { .unwrap(); state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block_three.clone().into(), )) .await .unwrap(); // Test the block is gossiped, after waiting for the multi-gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; peer_set .expect_request(Request::AdvertiseBlock(block_three.hash())) .await @@ -567,7 +569,7 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { ); // Test transaction 2 is gossiped, after waiting for the multi-gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; let mut hs = HashSet::new(); hs.insert(tx2_id); @@ -591,14 +593,14 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { for block in more_blocks { state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block.clone().into(), )) .await .unwrap(); // Test the block is gossiped, after waiting for the multi-gossip delay - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; peer_set .expect_request(Request::AdvertiseBlock(block.hash())) .await @@ -629,13 +631,13 @@ async fn mempool_transaction_expiration() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -727,13 +729,13 @@ async fn inbound_block_height_lookahead_limit() -> Result<(), crate::BoxError> { let sync_gossip_result = sync_gossip_task_handle.now_or_never(); assert!( - matches!(sync_gossip_result, None), + sync_gossip_result.is_none(), "unexpected error or panic in sync gossip task: {sync_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -771,6 +773,7 @@ async fn setup( let address_book = AddressBook::new( SocketAddr::from_str("0.0.0.0:0").unwrap(), Mainnet, + DEFAULT_MAX_CONNS_PER_IP, Span::none(), ); let address_book = Arc::new(std::sync::Mutex::new(address_book)); @@ -784,7 +787,7 @@ async fn setup( // Download task panics and timeouts are propagated to the tests that use Groth16 verifiers. let (block_verifier, _transaction_verifier, _groth16_download_handle, _max_checkpoint_height) = - zebra_consensus::chain::init( + zebra_consensus::router::init( consensus_config.clone(), network, state_service.clone(), @@ -812,7 +815,7 @@ async fn setup( .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( genesis_block.clone().into(), )) .await @@ -842,7 +845,7 @@ async fn setup( .unwrap(); state_service .clone() - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block_one.clone().into(), )) .await diff --git a/zebrad/src/components/inbound/tests/real_peer_set.rs b/zebrad/src/components/inbound/tests/real_peer_set.rs index 6f9df6aa4e3..ac773145966 100644 --- a/zebrad/src/components/inbound/tests/real_peer_set.rs +++ b/zebrad/src/components/inbound/tests/real_peer_set.rs @@ -6,10 +6,7 @@ use futures::FutureExt; use indexmap::IndexSet; use tokio::{sync::oneshot, task::JoinHandle}; use tower::{ - buffer::Buffer, - builder::ServiceBuilder, - util::{BoxCloneService, BoxService}, - ServiceExt, + buffer::Buffer, builder::ServiceBuilder, load_shed::LoadShed, util::BoxService, ServiceExt, }; use zebra_chain::{ @@ -18,9 +15,9 @@ use zebra_chain::{ serialization::ZcashDeserializeInto, transaction::{AuthDigest, Hash as TxHash, Transaction, UnminedTx, UnminedTxId, WtxId}, }; -use zebra_consensus::{chain::VerifyChainError, error::TransactionError, transaction}; +use zebra_consensus::{error::TransactionError, router::RouterError, transaction}; use zebra_network::{ - canonical_peer_addr, connect_isolated_tcp_direct_with_inbound, types::InventoryHash, + canonical_peer_addr, connect_isolated_tcp_direct_with_inbound, types::InventoryHash, CacheDir, Config as NetworkConfig, InventoryResponse, PeerError, Request, Response, SharedPeerError, }; use zebra_node_services::mempool; @@ -108,13 +105,13 @@ async fn inbound_peers_empty_address_book() -> Result<(), crate::BoxError> { let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -191,13 +188,13 @@ async fn inbound_block_empty_state_notfound() -> Result<(), crate::BoxError> { let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -311,13 +308,13 @@ async fn inbound_tx_empty_state_notfound() -> Result<(), crate::BoxError> { let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -461,13 +458,13 @@ async fn outbound_tx_unrelated_response_notfound() -> Result<(), crate::BoxError let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -574,13 +571,13 @@ async fn outbound_tx_partial_response_notfound() -> Result<(), crate::BoxError> let block_gossip_result = block_gossip_task_handle.now_or_never(); assert!( - matches!(block_gossip_result, None), + block_gossip_result.is_none(), "unexpected error or panic in block gossip task: {block_gossip_result:?}", ); let tx_gossip_result = tx_gossip_task_handle.now_or_never(); assert!( - matches!(tx_gossip_result, None), + tx_gossip_result.is_none(), "unexpected error or panic in transaction gossip task: {tx_gossip_result:?}", ); @@ -600,7 +597,12 @@ async fn setup( // connected peer which responds with isolated_peer_response Buffer, // inbound service - BoxCloneService, + LoadShed< + Buffer< + BoxService, + zebra_network::Request, + >, + >, // outbound peer set (only has the connected peer) Buffer< BoxService, @@ -609,7 +611,7 @@ async fn setup( Buffer, mempool::Request>, Buffer, zebra_state::Request>, // mocked services - MockService, + MockService, MockService, // real tasks JoinHandle>, @@ -626,11 +628,11 @@ async fn setup( // Inbound let (setup_tx, setup_rx) = oneshot::channel(); let inbound_service = Inbound::new(MAX_INBOUND_CONCURRENCY, setup_rx); + // TODO: add a timeout just above the service, if needed let inbound_service = ServiceBuilder::new() - .boxed_clone() .load_shed() .buffer(10) - .service(inbound_service); + .service(BoxService::new(inbound_service)); // State // UTXO verification doesn't matter for these tests. @@ -647,6 +649,7 @@ async fn setup( // Stop Zebra making outbound connections initial_mainnet_peers: IndexSet::new(), initial_testnet_peers: IndexSet::new(), + cache_dir: CacheDir::disabled(), ..NetworkConfig::default() }; diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 7415dffe03c..aef623e45fa 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -411,41 +411,38 @@ impl Mempool { || self.transaction_cost_bar.is_none() || self.rejected_count_bar.is_none()) { - let max_transaction_count = self.config.tx_cost_limit + let _max_transaction_count = self.config.tx_cost_limit / zebra_chain::transaction::MEMPOOL_TRANSACTION_COST_THRESHOLD; - self.queued_count_bar = Some( - howudoin::new() - .label("Mempool Queue") - .set_pos(0u64) - .set_len( - u64::try_from(downloads::MAX_INBOUND_CONCURRENCY).expect("fits in u64"), - ), - ); - - self.transaction_count_bar = Some( - howudoin::new() - .label("Mempool Txs") - .set_pos(0u64) - .set_len(max_transaction_count), - ); - - self.transaction_cost_bar = Some( - howudoin::new() - .label("Mempool Cost") - .set_pos(0u64) - .set_len(self.config.tx_cost_limit) - .fmt_as_bytes(true), - ); - - self.rejected_count_bar = Some( - howudoin::new() - .label("Mempool Rejects") - .set_pos(0u64) - .set_len( - u64::try_from(storage::MAX_EVICTION_MEMORY_ENTRIES).expect("fits in u64"), - ), - ); + let transaction_count_bar = *howudoin::new_root() + .label("Mempool Transactions") + .set_pos(0u64); + // .set_len(max_transaction_count); + + let transaction_cost_bar = howudoin::new_with_parent(transaction_count_bar.id()) + .label("Mempool Cost") + .set_pos(0u64) + // .set_len(self.config.tx_cost_limit) + .fmt_as_bytes(true); + + let queued_count_bar = *howudoin::new_with_parent(transaction_cost_bar.id()) + .label("Mempool Queue") + .set_pos(0u64); + // .set_len( + // u64::try_from(downloads::MAX_INBOUND_CONCURRENCY).expect("fits in u64"), + // ); + + let rejected_count_bar = *howudoin::new_with_parent(queued_count_bar.id()) + .label("Mempool Rejects") + .set_pos(0u64); + // .set_len( + // u64::try_from(storage::MAX_EVICTION_MEMORY_ENTRIES).expect("fits in u64"), + // ); + + self.transaction_count_bar = Some(transaction_count_bar); + self.transaction_cost_bar = Some(transaction_cost_bar); + self.queued_count_bar = Some(queued_count_bar); + self.rejected_count_bar = Some(rejected_count_bar); } // Update if the mempool has ever been active diff --git a/zebrad/src/components/mempool/gossip.rs b/zebrad/src/components/mempool/gossip.rs index eefa2d53ae4..6d3b2b638bf 100644 --- a/zebrad/src/components/mempool/gossip.rs +++ b/zebrad/src/components/mempool/gossip.rs @@ -16,7 +16,10 @@ use zebra_network::MAX_TX_INV_IN_SENT_MESSAGE; use zebra_network as zn; -use crate::{components::sync::TIPS_RESPONSE_TIMEOUT, BoxError}; +use crate::{ + components::sync::{PEER_GOSSIP_DELAY, TIPS_RESPONSE_TIMEOUT}, + BoxError, +}; /// The maximum number of channel messages we will combine into a single peer broadcast. pub const MAX_CHANGES_BEFORE_SEND: usize = 10; @@ -96,6 +99,6 @@ where // // in practice, transactions arrive every 1-20 seconds, // so waiting 6 seconds can delay transaction propagation, in order to reduce peer load - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; } } diff --git a/zebrad/src/components/mempool/storage/verified_set.rs b/zebrad/src/components/mempool/storage/verified_set.rs index e6f0dcbd3f1..1d1f835fb36 100644 --- a/zebrad/src/components/mempool/storage/verified_set.rs +++ b/zebrad/src/components/mempool/storage/verified_set.rs @@ -286,10 +286,110 @@ impl VerifiedSet { } fn update_metrics(&mut self) { + // Track the sum of unpaid actions within each transaction (as they are subject to the + // unpaid action limit). Transactions that have weight >= 1 have no unpaid actions by + // definition. + let mut unpaid_actions_with_weight_lt20pct = 0; + let mut unpaid_actions_with_weight_lt40pct = 0; + let mut unpaid_actions_with_weight_lt60pct = 0; + let mut unpaid_actions_with_weight_lt80pct = 0; + let mut unpaid_actions_with_weight_lt1 = 0; + + // Track the total number of paid actions across all transactions in the mempool. This + // added to the bucketed unpaid actions above is equal to the total number of conventional + // actions in the mempool. + let mut paid_actions = 0; + + // Track the sum of transaction sizes (the metric by which they are mainly limited) across + // several buckets. + let mut size_with_weight_lt1 = 0; + let mut size_with_weight_eq1 = 0; + let mut size_with_weight_gt1 = 0; + let mut size_with_weight_gt2 = 0; + let mut size_with_weight_gt3 = 0; + + for entry in self.full_transactions() { + paid_actions += entry.conventional_actions - entry.unpaid_actions; + + if entry.fee_weight_ratio > 3.0 { + size_with_weight_gt3 += entry.transaction.size; + } else if entry.fee_weight_ratio > 2.0 { + size_with_weight_gt2 += entry.transaction.size; + } else if entry.fee_weight_ratio > 1.0 { + size_with_weight_gt1 += entry.transaction.size; + } else if entry.fee_weight_ratio == 1.0 { + size_with_weight_eq1 += entry.transaction.size; + } else { + size_with_weight_lt1 += entry.transaction.size; + if entry.fee_weight_ratio < 0.2 { + unpaid_actions_with_weight_lt20pct += entry.unpaid_actions; + } else if entry.fee_weight_ratio < 0.4 { + unpaid_actions_with_weight_lt40pct += entry.unpaid_actions; + } else if entry.fee_weight_ratio < 0.6 { + unpaid_actions_with_weight_lt60pct += entry.unpaid_actions; + } else if entry.fee_weight_ratio < 0.8 { + unpaid_actions_with_weight_lt80pct += entry.unpaid_actions; + } else { + unpaid_actions_with_weight_lt1 += entry.unpaid_actions; + } + } + } + + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt20pct as f64, + "bk" => "< 0.2", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt40pct as f64, + "bk" => "< 0.4", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt60pct as f64, + "bk" => "< 0.6", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt80pct as f64, + "bk" => "< 0.8", + ); + metrics::gauge!( + "zcash.mempool.actions.unpaid", + unpaid_actions_with_weight_lt1 as f64, + "bk" => "< 1", + ); + metrics::gauge!("zcash.mempool.actions.paid", paid_actions as f64); metrics::gauge!( "zcash.mempool.size.transactions", self.transaction_count() as f64, ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_lt1 as f64, + "bk" => "< 1", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_eq1 as f64, + "bk" => "1", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_gt1 as f64, + "bk" => "> 1", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_gt2 as f64, + "bk" => "> 2", + ); + metrics::gauge!( + "zcash.mempool.size.weighted", + size_with_weight_gt3 as f64, + "bk" => "> 3", + ); metrics::gauge!( "zcash.mempool.size.bytes", self.transactions_serialized_size as f64, diff --git a/zebrad/src/components/mempool/tests/prop.rs b/zebrad/src/components/mempool/tests/prop.rs index 41523013a6b..0b0a35cd19e 100644 --- a/zebrad/src/components/mempool/tests/prop.rs +++ b/zebrad/src/components/mempool/tests/prop.rs @@ -11,7 +11,7 @@ use tower::{buffer::Buffer, util::BoxService}; use zebra_chain::{ block::{self, Block}, - fmt::DisplayToDebug, + fmt::{DisplayToDebug, TypeNameToDebug}, parameters::{Network, NetworkUpgrade}, serialization::ZcashDeserializeInto, transaction::VerifiedUnminedTx, @@ -20,7 +20,7 @@ use zebra_consensus::{error::TransactionError, transaction as tx}; use zebra_network as zn; use zebra_state::{self as zs, ChainTipBlock, ChainTipSender}; use zebra_test::mock_service::{MockService, PropTestAssertion}; -use zs::FinalizedBlock; +use zs::CheckpointVerifiedBlock; use crate::components::{ mempool::{config::Config, Mempool}, @@ -103,7 +103,7 @@ proptest! { network in any::(), mut previous_chain_tip in any::>(), mut transactions in vec(any::>(), 0..CHAIN_LENGTH), - fake_chain_tips in vec(any::>(), 0..CHAIN_LENGTH), + fake_chain_tips in vec(any::>(), 0..CHAIN_LENGTH), ) { let (runtime, _init_guard) = zebra_test::init_async(); @@ -239,7 +239,7 @@ proptest! { fn genesis_chain_tip() -> Option { zebra_test::vectors::BLOCK_MAINNET_GENESIS_BYTES .zcash_deserialize_into::>() - .map(FinalizedBlock::from) + .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from) .ok() } @@ -247,7 +247,7 @@ fn genesis_chain_tip() -> Option { fn block1_chain_tip() -> Option { zebra_test::vectors::BLOCK_MAINNET_1_BYTES .zcash_deserialize_into::>() - .map(FinalizedBlock::from) + .map(CheckpointVerifiedBlock::from) .map(ChainTipBlock::from) .ok() } diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 4dbed9426bb..60e70674309 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -412,7 +412,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> { .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block1.clone().into(), )) .await @@ -457,7 +457,7 @@ async fn mempool_cancel_mined() -> Result<(), Report> { // Push block 2 to the state state_service - .oneshot(zebra_state::Request::CommitFinalizedBlock( + .oneshot(zebra_state::Request::CommitCheckpointVerifiedBlock( block2.clone().into(), )) .await @@ -545,7 +545,7 @@ async fn mempool_cancel_downloads_after_network_upgrade() -> Result<(), Report> .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block1.clone().into(), )) .await @@ -822,7 +822,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block1.clone().into(), )) .await @@ -882,7 +882,7 @@ async fn mempool_reverifies_after_tip_change() -> Result<(), Report> { .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( block2.clone().into(), )) .await @@ -955,7 +955,7 @@ async fn setup( .ready() .await .unwrap() - .call(zebra_state::Request::CommitFinalizedBlock( + .call(zebra_state::Request::CommitCheckpointVerifiedBlock( genesis_block.clone().into(), )) .await diff --git a/zebrad/src/components/sync.rs b/zebrad/src/components/sync.rs index 8679f66d6b4..89ee01fb34b 100644 --- a/zebrad/src/components/sync.rs +++ b/zebrad/src/components/sync.rs @@ -8,7 +8,7 @@ use color_eyre::eyre::{eyre, Report}; use futures::stream::{FuturesUnordered, StreamExt}; use indexmap::IndexSet; use serde::{Deserialize, Serialize}; -use tokio::{sync::watch, time::sleep}; +use tokio::{sync::watch, task::JoinError, time::sleep}; use tower::{ builder::ServiceBuilder, hedge::Hedge, limit::ConcurrencyLimit, retry::Retry, timeout::Timeout, Service, ServiceExt, @@ -107,6 +107,16 @@ pub const MAX_TIPS_RESPONSE_HASH_COUNT: usize = 500; /// failure loop. pub const TIPS_RESPONSE_TIMEOUT: Duration = Duration::from_secs(6); +/// Controls how long we wait between gossiping successive blocks or transactions. +/// +/// ## Correctness +/// +/// If this timeout is set too high, blocks and transactions won't propagate through +/// the network efficiently. +/// +/// If this timeout is set too low, the peer set and remote peers can get overloaded. +pub const PEER_GOSSIP_DELAY: Duration = Duration::from_secs(7); + /// Controls how long we wait for a block download request to complete. /// /// This timeout makes sure that the syncer doesn't hang when: @@ -658,7 +668,17 @@ where let mut download_set = IndexSet::new(); while let Some(res) = requests.next().await { match res - .expect("panic in spawned obtain tips request") + .unwrap_or_else(|e @ JoinError { .. }| { + if e.is_panic() { + panic!("panic in obtain tips task: {e:?}"); + } else { + info!( + "task error during obtain tips task: {e:?},\ + is Zebra shutting down?" + ); + Err(e.into()) + } + }) .map_err::(|e| eyre!(e)) { Ok(zn::Response::BlockHashes(hashes)) => { diff --git a/zebrad/src/components/sync/downloads.rs b/zebrad/src/components/sync/downloads.rs index fde200f7fc8..f43d8005280 100644 --- a/zebrad/src/components/sync/downloads.rs +++ b/zebrad/src/components/sync/downloads.rs @@ -122,7 +122,7 @@ pub enum BlockDownloadVerifyError { #[error("block failed consensus validation: {error:?} {height:?} {hash:?}")] Invalid { #[source] - error: zebra_consensus::chain::VerifyChainError, + error: zebra_consensus::router::RouterError, height: block::Height, hash: block::Hash, }, @@ -543,7 +543,7 @@ where verification .map(|hash| (block_height, hash)) .map_err(|err| { - match err.downcast::() { + match err.downcast::() { Ok(error) => BlockDownloadVerifyError::Invalid { error: *error, height: block_height, hash }, Err(error) => BlockDownloadVerifyError::ValidationRequestError { error, height: block_height, hash }, } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index 89317c61d94..cd4a0f35660 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -12,8 +12,8 @@ use zebra_chain::{ use crate::application::release_version; -/// The estimated height that this release started to run. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_081_448; +/// The estimated height that this release will be published. +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_165_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. diff --git a/zebrad/src/components/sync/gossip.rs b/zebrad/src/components/sync/gossip.rs index a6fcb3b49d5..9cb02c6529f 100644 --- a/zebrad/src/components/sync/gossip.rs +++ b/zebrad/src/components/sync/gossip.rs @@ -9,9 +9,10 @@ use tower::{timeout::Timeout, Service, ServiceExt}; use zebra_network as zn; use zebra_state::ChainTipChange; -use crate::BoxError; - -use super::{SyncStatus, TIPS_RESPONSE_TIMEOUT}; +use crate::{ + components::sync::{SyncStatus, PEER_GOSSIP_DELAY, TIPS_RESPONSE_TIMEOUT}, + BoxError, +}; use BlockGossipError::*; @@ -90,6 +91,6 @@ where // // in practice, we expect blocks to arrive approximately every 75 seconds, // so waiting 6 seconds won't make much difference - tokio::time::sleep(TIPS_RESPONSE_TIMEOUT).await; + tokio::time::sleep(PEER_GOSSIP_DELAY).await; } } diff --git a/zebrad/src/components/sync/progress.rs b/zebrad/src/components/sync/progress.rs index 9ab272a2ca8..66acfb082c7 100644 --- a/zebrad/src/components/sync/progress.rs +++ b/zebrad/src/components/sync/progress.rs @@ -1,8 +1,12 @@ //! Progress tracking for blockchain syncing. -use std::{cmp::min, ops::Add, time::Duration}; +use std::{ + cmp::min, + ops::Add, + time::{Duration, Instant}, +}; -use chrono::{TimeZone, Utc}; +use chrono::Utc; use num_integer::div_ceil; use zebra_chain::{ @@ -118,17 +122,15 @@ pub async fn show_block_chain_progress( let mut last_state_change_height = Height(0); // The last time we logged an update. - // Initialised with the unix epoch, to simplify the code while still staying in the std range. - let mut last_log_time = Utc - .timestamp_opt(0, 0) - .single() - .expect("in-range number of seconds and valid nanosecond"); + let mut last_log_time = Instant::now(); #[cfg(feature = "progress-bar")] let block_bar = howudoin::new().label("Blocks"); loop { let now = Utc::now(); + let instant_now = Instant::now(); + let is_syncer_stopped = sync_status.is_close_to_tip(); if let Some(estimated_height) = @@ -142,26 +144,28 @@ pub async fn show_block_chain_progress( let network_upgrade = NetworkUpgrade::current(network, current_height); // Send progress reports for block height + // + // TODO: split the progress bar height update into its own function. #[cfg(feature = "progress-bar")] if matches!(howudoin::cancelled(), Some(true)) { block_bar.close(); } else { block_bar .set_pos(current_height.0) - .set_len(u64::from(estimated_height.0)) - .desc(network_upgrade.to_string()); + .set_len(u64::from(estimated_height.0)); } - // Skip logging if it isn't time for it yet - let elapsed_since_log = (now - last_log_time) - .to_std() - .expect("elapsed times are in range"); + // Skip logging and status updates if it isn't time for them yet. + let elapsed_since_log = instant_now.saturating_duration_since(last_log_time); if elapsed_since_log < LOG_INTERVAL { + tokio::time::sleep(PROGRESS_BAR_INTERVAL).await; continue; } else { - last_log_time = now; + last_log_time = instant_now; } + // TODO: split logging / status updates into their own function. + // Work out the sync progress towards the estimated tip. let sync_progress = f64::from(current_height.0) / f64::from(estimated_height.0); let sync_percent = format!( @@ -212,7 +216,7 @@ pub async fn show_block_chain_progress( // TODO: use add_warn(), but only add each warning once #[cfg(feature = "progress-bar")] - block_bar.desc("chain updates have stalled"); + block_bar.desc(format!("{}: sync has stalled", network_upgrade)); } else if is_syncer_stopped && remaining_sync_blocks > MIN_SYNC_WARNING_BLOCKS { // We've stopped syncing blocks, but we estimate we're a long way from the tip. // @@ -230,7 +234,10 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("sync is very slow, or estimated tip is wrong"); + block_bar.desc(format!( + "{}: sync is very slow, or estimated tip is wrong", + network_upgrade + )); } else if is_syncer_stopped && current_height <= after_checkpoint_height { // We've stopped syncing blocks, // but we're below the minimum height estimated from our checkpoints. @@ -254,7 +261,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("sync is very slow"); + block_bar.desc(format!("{}: sync is very slow", network_upgrade)); } else if is_syncer_stopped { // We've stayed near the tip for a while, and we've stopped syncing lots of blocks. // So we're mostly using gossiped blocks now. @@ -268,7 +275,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc(format!("{}: initial sync finished", network_upgrade)); + block_bar.desc(format!("{}: waiting for next block", network_upgrade)); } else if remaining_sync_blocks <= MAX_CLOSE_TO_TIP_BLOCKS { // We estimate we're near the tip, but we have been syncing lots of blocks recently. // We might also be using some gossiped blocks. @@ -283,7 +290,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc(format!("{}: initial sync almost finished", network_upgrade)); + block_bar.desc(format!("{}: finishing initial sync", network_upgrade)); } else { // We estimate we're far from the tip, and we've been syncing lots of blocks. info!( @@ -294,9 +301,14 @@ pub async fn show_block_chain_progress( %time_since_last_state_block, "estimated progress to chain tip", ); + + #[cfg(feature = "progress-bar")] + block_bar.desc(format!("{}: syncing blocks", network_upgrade)); } } else { let sync_percent = format!("{:.SYNC_PERCENT_FRAC_DIGITS$} %", 0.0f64,); + #[cfg(feature = "progress-bar")] + let network_upgrade = NetworkUpgrade::Genesis; if is_syncer_stopped { // We've stopped syncing blocks, @@ -310,7 +322,7 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("can't download genesis block"); + block_bar.desc(format!("{}: can't download genesis block", network_upgrade)); } else { // We're waiting for the genesis block to be committed to the state, // before we can estimate the best chain tip. @@ -321,7 +333,10 @@ pub async fn show_block_chain_progress( ); #[cfg(feature = "progress-bar")] - block_bar.desc("waiting to download genesis block"); + block_bar.desc(format!( + "{}: waiting to download genesis block", + network_upgrade + )); } } diff --git a/zebrad/src/components/sync/tests/vectors.rs b/zebrad/src/components/sync/tests/vectors.rs index 940f4c27f4d..3a656904ef0 100644 --- a/zebrad/src/components/sync/tests/vectors.rs +++ b/zebrad/src/components/sync/tests/vectors.rs @@ -45,7 +45,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -88,7 +88,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -96,7 +96,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -144,7 +144,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -174,7 +174,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - chain_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -186,7 +186,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -217,7 +217,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3 & 4 are fetched in order, then verified concurrently @@ -238,7 +238,7 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 3..=4 { - chain_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -250,12 +250,12 @@ async fn sync_blocks_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -272,7 +272,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -315,7 +315,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -323,7 +323,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -373,7 +373,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -403,7 +403,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - chain_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -415,7 +415,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -448,7 +448,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3 & 4 are fetched in order, then verified concurrently @@ -469,7 +469,7 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { .collect(); for _ in 3..=4 { - chain_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -481,12 +481,12 @@ async fn sync_blocks_duplicate_hashes_ok() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -500,7 +500,7 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -535,11 +535,11 @@ async fn sync_block_lookahead_drop() -> Result<(), crate::BoxError> { // Block is dropped because it is too far ahead of the tip. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -555,7 +555,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -597,7 +597,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -605,7 +605,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -654,7 +654,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 982k, 1, 2) in response order state_service @@ -694,7 +694,7 @@ async fn sync_block_too_high_obtain_tips() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -710,7 +710,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { let ( chain_sync_future, _sync_status, - mut chain_verifier, + mut block_verifier_router, mut peer_set, mut state_service, _mock_chain_tip_sender, @@ -758,7 +758,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { .await .respond(zn::Response::Blocks(vec![Available(block0.clone())])); - chain_verifier + block_verifier_router .expect_request(zebra_consensus::Request::Commit(block0)) .await .respond(block0_hash); @@ -766,7 +766,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. // We expect more requests to the state service, because the syncer keeps on running. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for genesis again state_service @@ -814,7 +814,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { // Check that nothing unexpected happened. peer_set.expect_no_requests().await; - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; // State is checked for all non-tip blocks (blocks 1 & 2) in response order state_service @@ -844,7 +844,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { .collect(); for _ in 1..=2 { - chain_verifier + block_verifier_router .expect_request_that(|req| remaining_blocks.remove(&req.block().hash()).is_some()) .await .respond_with(|req| req.block().hash()); @@ -856,7 +856,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { ); // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // ChainSync::extend_tips @@ -888,7 +888,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { } // Check that nothing unexpected happened. - chain_verifier.expect_no_requests().await; + block_verifier_router.expect_no_requests().await; state_service.expect_no_requests().await; // Blocks 3, 4, 982k are fetched in order, then verified concurrently, @@ -915,7 +915,7 @@ async fn sync_block_too_high_extend_tips() -> Result<(), crate::BoxError> { let chain_sync_result = chain_sync_task_handle.now_or_never(); assert!( - matches!(chain_sync_result, None), + chain_sync_result.is_none(), "unexpected error or panic in chain sync task: {chain_sync_result:?}", ); @@ -926,7 +926,7 @@ fn setup() -> ( // ChainSync impl Future> + Send, SyncStatus, - // ChainVerifier + // BlockVerifierRouter MockService, // PeerSet MockService, @@ -951,7 +951,7 @@ fn setup() -> ( .with_max_request_delay(MAX_SERVICE_REQUEST_DELAY) .for_unit_tests(); - let chain_verifier = MockService::build() + let block_verifier_router = MockService::build() .with_max_request_delay(MAX_SERVICE_REQUEST_DELAY) .for_unit_tests(); @@ -965,7 +965,7 @@ fn setup() -> ( &config, Height(0), peer_set.clone(), - chain_verifier.clone(), + block_verifier_router.clone(), state_service.clone(), mock_chain_tip, ); @@ -975,7 +975,7 @@ fn setup() -> ( ( chain_sync_future, sync_status, - chain_verifier, + block_verifier_router, peer_set, state_service, mock_chain_tip_sender, diff --git a/zebrad/src/components/tokio.rs b/zebrad/src/components/tokio.rs index f4225bebd57..802ebc31052 100644 --- a/zebrad/src/components/tokio.rs +++ b/zebrad/src/components/tokio.rs @@ -83,7 +83,7 @@ impl RuntimeRun for Runtime { } Err(error) => { warn!(?error, "shutting down Zebra due to an error"); - app_writer().shutdown(Shutdown::Forced); + APPLICATION.shutdown(Shutdown::Forced); } } } diff --git a/zebrad/src/components/tracing.rs b/zebrad/src/components/tracing.rs index 3aa27f4c756..439c5052e49 100644 --- a/zebrad/src/components/tracing.rs +++ b/zebrad/src/components/tracing.rs @@ -131,6 +131,27 @@ pub struct Config { pub use_journald: bool, } +impl Config { + /// Returns `true` if standard output should use color escapes. + /// Automatically checks if Zebra is running in a terminal. + pub fn use_color_stdout(&self) -> bool { + self.force_use_color || (self.use_color && atty::is(atty::Stream::Stdout)) + } + + /// Returns `true` if standard error should use color escapes. + /// Automatically checks if Zebra is running in a terminal. + pub fn use_color_stderr(&self) -> bool { + self.force_use_color || (self.use_color && atty::is(atty::Stream::Stderr)) + } + + /// Returns `true` if output that could go to standard output or standard error + /// should use color escapes. Automatically checks if Zebra is running in a terminal. + pub fn use_color_stdout_and_stderr(&self) -> bool { + self.force_use_color + || (self.use_color && atty::is(atty::Stream::Stdout) && atty::is(atty::Stream::Stderr)) + } +} + impl Default for Config { fn default() -> Self { #[cfg(feature = "progress-bar")] diff --git a/zebrad/src/components/tracing/component.rs b/zebrad/src/components/tracing/component.rs index 0cf0456ee6d..745315ae16a 100644 --- a/zebrad/src/components/tracing/component.rs +++ b/zebrad/src/components/tracing/component.rs @@ -15,12 +15,34 @@ use tracing_subscriber::{ util::SubscriberInitExt, EnvFilter, }; +use zebra_chain::parameters::Network; -use crate::{application::app_version, components::tracing::Config}; +use crate::{application::build_version, components::tracing::Config}; #[cfg(feature = "flamegraph")] use super::flame; +// Art generated with these two images. +// Zebra logo: book/theme/favicon.png +// License: MIT or Apache 2.0 +// +// Heart image: https://commons.wikimedia.org/wiki/File:Love_Heart_SVG.svg +// Author: Bubinator +// License: Public Domain or Unconditional Use +// +// How to render +// +// Convert heart image to PNG (2000px): +// curl -o heart.svg https://upload.wikimedia.org/wikipedia/commons/4/42/Love_Heart_SVG.svg +// cargo install resvg +// resvg --width 2000 --height 2000 heart.svg heart.png +// +// Then to text (40x20): +// img2txt -W 40 -H 20 -f utf8 -d none heart.png > heart.utf8 +// img2txt -W 40 -H 20 -f utf8 -d none favicon.png > logo.utf8 +// paste -d "\0" logo.utf8 heart.utf8 > zebra.utf8 +static ZEBRA_ART: [u8; include_bytes!("zebra.utf8").len()] = *include_bytes!("zebra.utf8"); + /// A type-erased boxed writer that can be sent between threads safely. pub type BoxWrite = Box; @@ -47,19 +69,52 @@ pub struct Tracing { /// responsible for flushing any remaining logs when the program terminates. // // Correctness: must be listed last in the struct, so it drops after other drops have logged. - _guard: WorkerGuard, + _guard: Option, } impl Tracing { - /// Try to create a new [`Tracing`] component with the given `filter`. - #[allow(clippy::print_stdout, clippy::print_stderr)] - pub fn new(config: Config) -> Result { + /// Try to create a new [`Tracing`] component with the given `config`. + /// + /// If `uses_intro` is true, show a welcome message, the `network`, + /// and the Zebra logo on startup. (If the terminal supports it.) + // + // This method should only print to stderr, because stdout is for tracing logs. + #[allow(clippy::print_stderr, clippy::unwrap_in_result)] + pub fn new(network: Network, config: Config, uses_intro: bool) -> Result { + // Only use color if tracing output is being sent to a terminal or if it was explicitly + // forced to. + let use_color = config.use_color_stdout(); + let use_color_stderr = config.use_color_stderr(); + let filter = config.filter.unwrap_or_default(); let flame_root = &config.flamegraph; - let writer = if let Some(log_file) = config.log_file.as_ref() { - println!("running zebra"); + // Only show the intro for user-focused node server commands like `start` + if uses_intro { + // If it's a terminal and color escaping is enabled: clear screen and + // print Zebra logo (here `use_color` is being interpreted as + // "use escape codes") + if use_color_stderr { + // Clear screen + eprint!("\x1B[2J"); + eprintln!( + "{}", + std::str::from_utf8(&ZEBRA_ART) + .expect("should always work on a UTF-8 encoded constant") + ); + } + + eprintln!( + "Thank you for running a {} zebrad {} node!", + network.lowercase_name(), + build_version() + ); + eprintln!( + "You're helping to strengthen the network and contributing to a social good :)" + ); + } + let writer = if let Some(log_file) = config.log_file.as_ref() { // Make sure the directory for the log file exists. // If the log is configured in the current directory, it won't have a parent directory. // @@ -74,16 +129,18 @@ impl Tracing { let log_file_dir = log_file.parent(); if let Some(log_file_dir) = log_file_dir { if !log_file_dir.exists() { - println!("directory for log file {log_file:?} does not exist, trying to create it..."); + eprintln!("Directory for log file {log_file:?} does not exist, trying to create it..."); if let Err(create_dir_error) = fs::create_dir_all(log_file_dir) { - println!("failed to create directory for log file: {create_dir_error}"); - println!("trying log file anyway..."); + eprintln!("Failed to create directory for log file: {create_dir_error}"); + eprintln!("Trying log file anyway..."); } } } - println!("sending logs to {log_file:?}..."); + if uses_intro { + eprintln!("Sending logs to {log_file:?}..."); + } let log_file = File::options().append(true).create(true).open(log_file)?; Box::new(log_file) as BoxWrite } else { @@ -94,15 +151,10 @@ impl Tracing { // Builds a lossy NonBlocking logger with a default line limit of 128_000 or an explicit buffer_limit. // The write method queues lines down a bounded channel with this capacity to a worker thread that writes to stdout. // Increments error_counter and drops lines when the buffer is full. - let (non_blocking, _guard) = NonBlockingBuilder::default() + let (non_blocking, worker_guard) = NonBlockingBuilder::default() .buffered_lines_limit(config.buffer_limit.max(100)) .finish(writer); - // Only use color if tracing output is being sent to a terminal or if it was explicitly - // forced to. - let use_color = - config.force_use_color || (config.use_color && atty::is(atty::Stream::Stdout)); - // Construct a format subscriber with the supplied global logging filter, // and optionally enable reloading. // @@ -264,10 +316,6 @@ impl Tracing { howudoin::init(terminal_consumer); info!("activated progress bar"); - - if config.log_file.is_some() { - eprintln!("waiting for initial progress reports..."); - } } Ok(Self { @@ -275,10 +323,21 @@ impl Tracing { initial_filter: filter, #[cfg(feature = "flamegraph")] flamegrapher, - _guard, + _guard: Some(worker_guard), }) } + /// Drops guard for worker thread of non-blocking logger, + /// to flush any remaining logs when the program terminates. + pub fn shutdown(&mut self) { + self.filter_handle.take(); + + #[cfg(feature = "flamegraph")] + self.flamegrapher.take(); + + self._guard.take(); + } + /// Return the currently-active tracing filter. pub fn filter(&self) -> String { if let Some(filter_handle) = self.filter_handle.as_ref() { @@ -330,7 +389,7 @@ impl Component for Tracing { } fn version(&self) -> abscissa_core::Version { - app_version() + build_version() } fn before_shutdown(&self, _kind: Shutdown) -> Result<(), FrameworkError> { diff --git a/zebrad/src/components/tracing/endpoint.rs b/zebrad/src/components/tracing/endpoint.rs index 2bd95059246..c4c6440def8 100644 --- a/zebrad/src/components/tracing/endpoint.rs +++ b/zebrad/src/components/tracing/endpoint.rs @@ -120,9 +120,9 @@ To set the filter, POST the new filter string to /filter: (&Method::GET, "/filter") => Response::builder() .status(StatusCode::OK) .body(Body::from( - app_reader() + APPLICATION .state() - .components + .components() .get_downcast_ref::() .expect("Tracing component should be available") .filter(), @@ -130,9 +130,9 @@ To set the filter, POST the new filter string to /filter: .expect("response with known status code cannot fail"), (&Method::POST, "/filter") => match read_filter(req).await { Ok(filter) => { - app_reader() + APPLICATION .state() - .components + .components() .get_downcast_ref::() .expect("Tracing component should be available") .reload_filter(filter); diff --git a/zebrad/src/components/tracing/zebra.utf8 b/zebrad/src/components/tracing/zebra.utf8 new file mode 100644 index 00000000000..18b39c533c5 --- /dev/null +++ b/zebrad/src/components/tracing/zebra.utf8 @@ -0,0 +1,20 @@ + X@8:::::::@X  S888@t S888% + @::X:;SX;%8S8@@:X::8 S: .8 @. .@ + @:;@8@.S8 ;;t;. XX.;@@;:@ ; % ; X + 8:@88 .%@.S@.XS: 888:8 8 t% @ + :;SX XS8;::@X@8::;8t8: 8;;:@    + @::@ 88:::8 8::88::::::X@ 8::8 @ 8 + @:XS8 ;::::::8.88%8@S88::::8: 8S8:8 X X + ::;8 t8S8::::@St 88SX @SS@8:8S;8S @@:: 8 8 +8:XX %%@@::X% 8: X@ t:8t X ..%t8 ;8:8   +::8t: tX:X8tS 8: :@ ;.8@.X tt888: @8:: X % +::8S: @:8Xtt% 8: S 8 8S.   .8;::. 88:: % t +::X8 S@8 8:8.@8@8Xt S  .%8;S %8:: 8 8 + ::@8 S .  @:::::::X;@ 8.88S8 @8;:X % % + 8:X8X .@:X88:::::::::888 @;:X %88:: % % + @::S 888@8:::::::::8X; 8 8::8 8 8 + X:;S@ tXX8;::::::::S::@8 8;S:8 S. :t + ::8S@ :S88XX@8X: @XX:: % % + 8:;%X8.:S t; X8%S:8X @ S + X8::8SS8SX8@@X@8;8S:8X S % + X@8:::::::@XS % diff --git a/zebrad/src/prelude.rs b/zebrad/src/prelude.rs index b4d6eb835a6..b537590734e 100644 --- a/zebrad/src/prelude.rs +++ b/zebrad/src/prelude.rs @@ -2,7 +2,7 @@ //! which are generally useful and should be available everywhere. /// Application state accessors -pub use crate::application::{app_config, app_reader, app_writer}; +pub use crate::application::APPLICATION; /// Commonly used Abscissa traits pub use abscissa_core::{Application, Command, Runnable}; diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 81952672cf7..a6fab9d5ec0 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -136,6 +136,7 @@ //! ``` use std::{ + cmp::Ordering, collections::HashSet, env, fs, panic, path::PathBuf, @@ -146,6 +147,7 @@ use color_eyre::{ eyre::{eyre, Result, WrapErr}, Help, }; +use semver::Version; use zebra_chain::{ block::{self, Height}, @@ -153,7 +155,7 @@ use zebra_chain::{ }; use zebra_network::constants::PORT_IN_USE_ERROR; use zebra_node_services::rpc_client::RpcRequestClient; -use zebra_state::constants::LOCK_FILE_ERROR; +use zebra_state::{constants::LOCK_FILE_ERROR, database_format_version_in_code}; use zebra_test::{args, command::ContextFrom, net::random_known_port, prelude::*}; @@ -165,7 +167,10 @@ use common::{ config::{ config_file_full_path, configs_dir, default_test_config, persistent_test_config, testdir, }, - launch::{spawn_zebrad_for_rpc, ZebradTestDirExt, BETWEEN_NODES_DELAY, LAUNCH_DELAY}, + launch::{ + spawn_zebrad_for_rpc, spawn_zebrad_without_rpc, ZebradTestDirExt, BETWEEN_NODES_DELAY, + EXTENDED_LAUNCH_DELAY, LAUNCH_DELAY, + }, lightwalletd::{can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc}, sync::{ create_cached_database_height, sync_until, MempoolBehavior, LARGE_CHECKPOINT_TEST_HEIGHT, @@ -264,11 +269,11 @@ fn help_no_args() -> Result<()> { is_zebrad_version, &output.output.stdout, "stdout", - "a valid zebrad semantic version", + "are valid zebrad semantic versions", )?; - // Make sure we are in help by looking usage string - output.stdout_line_contains("USAGE:")?; + // Make sure we are in help by looking for the usage string + output.stdout_line_contains("Usage:")?; Ok(()) } @@ -371,6 +376,7 @@ async fn db_init_outside_future_executor() -> Result<()> { Ok(()) } +/// Check that the block state and peer list caches are written to disk. #[test] fn persistent_mode() -> Result<()> { let _init_guard = zebra_test::init(); @@ -381,7 +387,7 @@ fn persistent_mode() -> Result<()> { let mut child = testdir.spawn_child(args!["-v", "start"])?; // Run the program and kill it after a few seconds - std::thread::sleep(LAUNCH_DELAY); + std::thread::sleep(EXTENDED_LAUNCH_DELAY); child.kill(false)?; let output = child.wait_with_output()?; @@ -395,6 +401,13 @@ fn persistent_mode() -> Result<()> { "state directory empty despite persistent state config" ); + let cache_dir = testdir.path().join("network"); + assert_with_context!( + cache_dir.read_dir()?.count() > 0, + &output, + "network directory empty despite persistent network config" + ); + Ok(()) } @@ -424,6 +437,9 @@ fn misconfigured_ephemeral_missing_directory() -> Result<()> { ) } +/// Check that the state directory created on disk matches the state config. +/// +/// TODO: do a similar test for `network.cache_dir` #[tracing::instrument] fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) -> Result<()> { use std::io::ErrorKind; @@ -449,7 +465,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) .with_config(&mut config)? .spawn_child(args!["start"])?; // Run the program and kill it after a few seconds - std::thread::sleep(LAUNCH_DELAY); + std::thread::sleep(EXTENDED_LAUNCH_DELAY); child.kill(false)?; let output = child.wait_with_output()?; @@ -472,7 +488,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) ignored_cache_dir.read_dir().unwrap().collect::>() ); - ["state", "zebrad.toml"].iter() + ["state", "network", "zebrad.toml"].iter() } // we didn't create the state directory, so it should not exist @@ -490,7 +506,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) ignored_cache_dir.read_dir().unwrap().collect::>() ); - ["zebrad.toml"].iter() + ["network", "zebrad.toml"].iter() } }; @@ -522,7 +538,7 @@ fn version_no_args() -> Result<()> { let testdir = testdir()?.with_config(&mut default_test_config()?)?; - let child = testdir.spawn_child(args!["version"])?; + let child = testdir.spawn_child(args!["--version"])?; let output = child.wait_with_output()?; let output = output.assert_success()?; @@ -544,15 +560,23 @@ fn version_args() -> Result<()> { let testdir = testdir()?.with_config(&mut default_test_config()?)?; let testdir = &testdir; - // unexpected free argument `argument` - let child = testdir.spawn_child(args!["version", "argument"])?; + // unrecognized option `-f` + let child = testdir.spawn_child(args!["tip-height", "-f"])?; let output = child.wait_with_output()?; output.assert_failure()?; - // unrecognized option `-f` - let child = testdir.spawn_child(args!["version", "-f"])?; + // unrecognized option `-f` is ignored + let child = testdir.spawn_child(args!["--version", "-f"])?; let output = child.wait_with_output()?; - output.assert_failure()?; + let output = output.assert_success()?; + + // The output should only contain the version + output.output_check( + is_zebrad_version, + &output.output.stdout, + "stdout", + "a valid zebrad semantic version", + )?; Ok(()) } @@ -572,8 +596,8 @@ fn config_tests() -> Result<()> { // Check that we have a current version of the config stored last_config_is_stored()?; - // Check that Zebra stored configuration works - stored_configs_works()?; + // Check that Zebra's previous configurations still work + stored_configs_work()?; // Runs `zebrad` serially to avoid potential port conflicts app_no_args()?; @@ -702,13 +726,31 @@ fn last_config_is_stored() -> Result<()> { .to_string(); // Loop all the stored configs + // + // TODO: use the same filename list code in last_config_is_stored() and stored_configs_work() for config_file in configs_dir() .read_dir() .expect("read_dir call failed") .flatten() { + let config_file_path = config_file.path(); + let config_file_name = config_file_path + .file_name() + .expect("config files must have a file name") + .to_string_lossy(); + + if config_file_name.as_ref().starts_with('.') || config_file_name.as_ref().starts_with('#') + { + // Skip editor files and other invalid config paths + tracing::info!( + ?config_file_path, + "skipping hidden/temporary config file path" + ); + continue; + } + // Read stored config - let stored_content = fs::read_to_string(config_file_full_path(config_file.path())) + let stored_content = fs::read_to_string(config_file_full_path(config_file_path)) .expect("Should have been able to read the file") .trim() .to_string(); @@ -736,7 +778,7 @@ fn last_config_is_stored() -> Result<()> { Or run: \n\ cargo build {}--bin zebrad && \n\ zebrad generate | \n\ - sed \"s/cache_dir = '.*'/cache_dir = 'cache_dir'/\" > \n\ + sed 's/cache_dir = \".*\"/cache_dir = \"cache_dir\"/' > \n\ zebrad/tests/common/configs/{}.toml", if cfg!(feature = "getblocktemplate-rpcs") { GET_BLOCK_TEMPLATE_CONFIG_PREFIX @@ -832,7 +874,7 @@ fn invalid_generated_config() -> Result<()> { /// Test all versions of `zebrad.toml` we have stored can be parsed by the latest `zebrad`. #[tracing::instrument] -fn stored_configs_works() -> Result<()> { +fn stored_configs_work() -> Result<()> { let old_configs_dir = configs_dir(); for config_file in old_configs_dir @@ -840,15 +882,33 @@ fn stored_configs_works() -> Result<()> { .expect("read_dir call failed") .flatten() { + let config_file_path = config_file.path(); + let config_file_name = config_file_path + .file_name() + .expect("config files must have a file name") + .to_string_lossy(); + + if config_file_name.as_ref().starts_with('.') || config_file_name.as_ref().starts_with('#') + { + // Skip editor files and other invalid config paths + tracing::info!( + ?config_file_path, + "skipping hidden/temporary config file path" + ); + continue; + } + // ignore files starting with getblocktemplate prefix // if we were not built with the getblocktemplate-rpcs feature. #[cfg(not(feature = "getblocktemplate-rpcs"))] - if config_file - .file_name() - .into_string() - .expect("all files names should be string convertible") + if config_file_name + .as_ref() .starts_with(GET_BLOCK_TEMPLATE_CONFIG_PREFIX) { + tracing::info!( + ?config_file_path, + "skipping getblocktemplate-rpcs config file path" + ); continue; } @@ -1038,7 +1098,8 @@ fn sync_large_checkpoints_mempool_mainnet() -> Result<()> { #[tracing::instrument] fn create_cached_database(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height(); - let checkpoint_stop_regex = format!("{STOP_AT_HEIGHT_REGEX}.*CommitFinalized request"); + let checkpoint_stop_regex = + format!("{STOP_AT_HEIGHT_REGEX}.*commit checkpoint-verified request"); create_cached_database_height( network, @@ -1056,7 +1117,7 @@ fn create_cached_database(network: Network) -> Result<()> { fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height() + 1200; let full_validation_stop_regex = - format!("{STOP_AT_HEIGHT_REGEX}.*best non-finalized chain root"); + format!("{STOP_AT_HEIGHT_REGEX}.*commit contextually-verified request"); create_cached_database_height( network, @@ -1408,6 +1469,88 @@ async fn rpc_endpoint(parallel_cpu_threads: bool) -> Result<()> { Ok(()) } +/// Test that the JSON-RPC endpoint responds to requests with different content types. +/// +/// This test ensures that the curl examples of zcashd rpc methods will also work in Zebra. +/// +/// https://zcash.github.io/rpc/getblockchaininfo.html +#[tokio::test] +async fn rpc_endpoint_client_content_type() -> Result<()> { + let _init_guard = zebra_test::init(); + if zebra_test::net::zebra_skip_network_tests() { + return Ok(()); + } + + // Write a configuration that has RPC listen_addr set + // [Note on port conflict](#Note on port conflict) + let mut config = random_known_rpc_port_config(true)?; + + let dir = testdir()?.with_config(&mut config)?; + let mut child = dir.spawn_child(args!["start"])?; + + // Wait until port is open. + child.expect_stdout_line_matches( + format!("Opened RPC endpoint at {}", config.rpc.listen_addr.unwrap()).as_str(), + )?; + + // Create an http client + let client = RpcRequestClient::new(config.rpc.listen_addr.unwrap()); + + // Call to `getinfo` RPC method with a no content type. + let res = client + .call_with_no_content_type("getinfo", "[]".to_string()) + .await?; + + // Zebra will insert valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a `text/plain`. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "text/plain".to_string()) + .await?; + + // Zebra will replace to the valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a `text/plain` content type as the zcashd rpc docs. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "text/plain;".to_string()) + .await?; + + // Zebra will replace to the valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a `text/plain; other string` content type. + let res = client + .call_with_content_type( + "getinfo", + "[]".to_string(), + "text/plain; other string".to_string(), + ) + .await?; + + // Zebra will replace to the valid `application/json` content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with a valid `application/json` content type. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "application/json".to_string()) + .await?; + + // Zebra will not replace valid content type and succeed. + assert!(res.status().is_success()); + + // Call to `getinfo` RPC method with invalid string as content type. + let res = client + .call_with_content_type("getinfo", "[]".to_string(), "whatever".to_string()) + .await?; + + // Zebra will not replace unrecognized content type and fail. + assert!(res.status().is_client_error()); + + Ok(()) +} + /// Test that Zebra's non-blocking logger works, by creating lots of debug output, but not reading the logs. /// Then make sure Zebra drops excess log lines. (Previously, it would block waiting for logs to be read.) /// @@ -1688,10 +1831,11 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { } if test_type.needs_lightwalletd_cached_state() { - lightwalletd.expect_stdout_line_matches("Found [0-9]{7} blocks in cache")?; + lightwalletd + .expect_stdout_line_matches("Done reading [0-9]{7} blocks from disk cache")?; } else if !test_type.allow_lightwalletd_cached_state() { // Timeout the test if we're somehow accidentally using a cached state in our temp dir - lightwalletd.expect_stdout_line_matches("Found 0 blocks in cache")?; + lightwalletd.expect_stdout_line_matches("Done reading 0 blocks from disk cache")?; } // getblock with the first Sapling block in Zebra's state @@ -1948,7 +2092,7 @@ fn zebra_state_conflict() -> Result<()> { dir_conflict_full.push("state"); dir_conflict_full.push(format!( "v{}", - zebra_state::constants::DATABASE_FORMAT_VERSION + zebra_state::database_format_version_in_code().major, )); dir_conflict_full.push(config.network.network.to_string().to_lowercase()); format!( @@ -2240,6 +2384,7 @@ fn end_of_support_is_checked_at_start() -> Result<()> { Ok(()) } + /// Test `zebra-checkpoints` on mainnet. /// /// If you want to run this test individually, see the module documentation. @@ -2262,3 +2407,199 @@ async fn generate_checkpoints_mainnet() -> Result<()> { async fn generate_checkpoints_testnet() -> Result<()> { common::checkpoints::run(Testnet).await } + +/// Check that new states are created with the current state format version, +/// and that restarting `zebrad` doesn't change the format version. +#[tokio::test] +async fn new_state_format() -> Result<()> { + for network in [Mainnet, Testnet] { + state_format_test("new_state_format_test", network, 2, None).await?; + } + + Ok(()) +} + +/// Check that outdated states are updated to the current state format version, +/// and that restarting `zebrad` doesn't change the updated format version. +/// +/// TODO: test partial updates, once we have some updates that take a while. +/// (or just add a delay during tests) +#[tokio::test] +async fn update_state_format() -> Result<()> { + let mut fake_version = database_format_version_in_code(); + fake_version.minor = 0; + fake_version.patch = 0; + + for network in [Mainnet, Testnet] { + state_format_test("update_state_format_test", network, 3, Some(&fake_version)).await?; + } + + Ok(()) +} + +/// Check that newer state formats are downgraded to the current state format version, +/// and that restarting `zebrad` doesn't change the format version. +/// +/// Future version compatibility is a best-effort attempt, this test can be disabled if it fails. +#[tokio::test] +async fn downgrade_state_format() -> Result<()> { + let mut fake_version = database_format_version_in_code(); + fake_version.minor = u16::MAX.into(); + fake_version.patch = 0; + + for network in [Mainnet, Testnet] { + state_format_test( + "downgrade_state_format_test", + network, + 3, + Some(&fake_version), + ) + .await?; + } + + Ok(()) +} + +/// Test state format changes, see calling tests for details. +async fn state_format_test( + base_test_name: &str, + network: Network, + reopen_count: usize, + fake_version: Option<&Version>, +) -> Result<()> { + let _init_guard = zebra_test::init(); + + let test_name = &format!("{base_test_name}/new"); + + // # Create a new state and check it has the current version + + let zebrad = spawn_zebrad_without_rpc(network, test_name, false, false, None, false)?; + + // Skip the test unless it has the required state and environmental variables. + let Some(mut zebrad) = zebrad else { + return Ok(()); + }; + + tracing::info!(?network, "running {test_name} using zebrad"); + + zebrad.expect_stdout_line_matches("creating new database with the current format")?; + zebrad.expect_stdout_line_matches("loaded Zebra state cache")?; + + // Give Zebra enough time to actually write the database to disk. + tokio::time::sleep(Duration::from_secs(1)).await; + + let logs = zebrad.kill_and_return_output(false)?; + + assert!( + !logs.contains("marked database format as upgraded"), + "unexpected format upgrade in logs:\n\ + {logs}" + ); + assert!( + !logs.contains("marked database format as downgraded"), + "unexpected format downgrade in logs:\n\ + {logs}" + ); + + let output = zebrad.wait_with_output()?; + let mut output = output.assert_failure()?; + + let mut dir = output + .take_dir() + .expect("dir should not already have been taken"); + + // [Note on port conflict](#Note on port conflict) + output + .assert_was_killed() + .wrap_err("Possible port conflict. Are there other acceptance tests running?")?; + + // # Apply the fake version if needed + let mut expect_older_version = false; + let mut expect_newer_version = false; + + if let Some(fake_version) = fake_version { + let test_name = &format!("{base_test_name}/apply_fake_version/{fake_version}"); + tracing::info!(?network, "running {test_name} using zebra-state"); + + let mut config = UseAnyState + .zebrad_config(test_name, false, Some(dir.path())) + .expect("already checked config")?; + config.network.network = network; + + zebra_state::write_database_format_version_to_disk(fake_version, &config.state, network) + .expect("can't write fake database version to disk"); + + // Give zebra_state enough time to actually write the database version to disk. + tokio::time::sleep(Duration::from_secs(1)).await; + + let running_version = database_format_version_in_code(); + + match fake_version.cmp(&running_version) { + Ordering::Less => expect_older_version = true, + Ordering::Equal => {} + Ordering::Greater => expect_newer_version = true, + } + } + + // # Reopen that state and check the version hasn't changed + + for reopened in 0..reopen_count { + let test_name = &format!("{base_test_name}/reopen/{reopened}"); + + if reopened > 0 { + expect_older_version = false; + expect_newer_version = false; + } + + let mut zebrad = spawn_zebrad_without_rpc(network, test_name, false, false, dir, false)? + .expect("unexpectedly missing required state or env vars"); + + tracing::info!(?network, "running {test_name} using zebrad"); + + if expect_older_version { + zebrad.expect_stdout_line_matches("trying to open older database format")?; + zebrad.expect_stdout_line_matches("marked database format as upgraded")?; + zebrad.expect_stdout_line_matches("database is fully upgraded")?; + } else if expect_newer_version { + zebrad.expect_stdout_line_matches("trying to open newer database format")?; + zebrad.expect_stdout_line_matches("marked database format as downgraded")?; + } else { + zebrad.expect_stdout_line_matches("trying to open current database format")?; + zebrad.expect_stdout_line_matches("loaded Zebra state cache")?; + } + + // Give Zebra enough time to actually write the database to disk. + tokio::time::sleep(Duration::from_secs(1)).await; + + let logs = zebrad.kill_and_return_output(false)?; + + if !expect_older_version { + assert!( + !logs.contains("marked database format as upgraded"), + "unexpected format upgrade in logs:\n\ + {logs}" + ); + } + + if !expect_newer_version { + assert!( + !logs.contains("marked database format as downgraded"), + "unexpected format downgrade in logs:\n\ + {logs}" + ); + } + + let output = zebrad.wait_with_output()?; + let mut output = output.assert_failure()?; + + dir = output + .take_dir() + .expect("dir should not already have been taken"); + + // [Note on port conflict](#Note on port conflict) + output + .assert_was_killed() + .wrap_err("Possible port conflict. Are there other acceptance tests running?")?; + } + Ok(()) +} diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 432e7ae5322..284add1c0df 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -11,8 +11,6 @@ use std::{ }; use color_eyre::eyre::{eyre, Result}; -use tempfile::TempDir; -use tokio::fs; use tower::{util::BoxService, Service}; use zebra_chain::{ @@ -25,7 +23,6 @@ use zebra_node_services::rpc_client::RpcRequestClient; use zebra_state::{ChainTipChange, LatestChainTip, MAX_BLOCK_REORG_HEIGHT}; use crate::common::{ - config::testdir, launch::spawn_zebrad_for_rpc, sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, test_type::TestType, @@ -78,83 +75,6 @@ pub async fn load_tip_height_from_state_directory( Ok(chain_tip_height) } -/// Recursively copy a chain state database directory into a new temporary directory. -pub async fn copy_state_directory(network: Network, source: impl AsRef) -> Result { - // Copy the database files for this state and network, excluding testnet and other state versions - let source = source.as_ref(); - let state_config = zebra_state::Config { - cache_dir: source.into(), - ..Default::default() - }; - let source_net_dir = state_config.db_path(network); - let source_net_dir = source_net_dir.as_path(); - let state_suffix = source_net_dir - .strip_prefix(source) - .expect("db_path() is a subdirectory"); - - let destination = testdir()?; - let destination_net_dir = destination.path().join(state_suffix); - - tracing::info!( - ?source, - ?source_net_dir, - ?state_suffix, - ?destination, - ?destination_net_dir, - "copying cached state files (this may take some time)...", - ); - - let mut remaining_directories = vec![PathBuf::from(source_net_dir)]; - - while let Some(directory) = remaining_directories.pop() { - let sub_directories = - copy_directory(&directory, source_net_dir, destination_net_dir.as_ref()).await?; - - remaining_directories.extend(sub_directories); - } - - Ok(destination) -} - -/// Copy the contents of a directory, and return the sub-directories it contains. -/// -/// Copies all files from the `directory` into the destination specified by the concatenation of -/// the `base_destination_path` and `directory` stripped of its `prefix`. -#[tracing::instrument] -async fn copy_directory( - directory: &Path, - prefix: &Path, - base_destination_path: &Path, -) -> Result> { - let mut sub_directories = Vec::new(); - let mut entries = fs::read_dir(directory).await?; - - let destination = - base_destination_path.join(directory.strip_prefix(prefix).expect("Invalid path prefix")); - - fs::create_dir_all(&destination).await?; - - while let Some(entry) = entries.next_entry().await? { - let entry_path = entry.path(); - let file_type = entry.file_type().await?; - - if file_type.is_file() { - let file_name = entry_path.file_name().expect("Missing file name"); - let destination_path = destination.join(file_name); - - fs::copy(&entry_path, destination_path).await?; - } else if file_type.is_dir() { - sub_directories.push(entry_path); - } else if file_type.is_symlink() { - unimplemented!("Symbolic link support is currently not necessary"); - } else { - panic!("Unknown file type"); - } - } - - Ok(sub_directories) -} - /// Accepts a network, test_type, test_name, and num_blocks (how many blocks past the finalized tip to try getting) /// /// Syncs zebra until the tip, gets some blocks near the tip, via getblock rpc calls, diff --git a/zebrad/tests/common/checkpoints.rs b/zebrad/tests/common/checkpoints.rs index b083d2126ca..cc5e6be40f9 100644 --- a/zebrad/tests/common/checkpoints.rs +++ b/zebrad/tests/common/checkpoints.rs @@ -303,7 +303,7 @@ impl ZebraCheckpointsTestDirExt for TempDir { let zebra_checkpoints = self.spawn_child_with_command(zebra_checkpoints_path, args.clone()); - let Err(system_path_error) = zebra_checkpoints else { + let Err(system_path_error) = zebra_checkpoints else { return zebra_checkpoints; }; diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..c6629087b72 --- /dev/null +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.0-rc.9.toml @@ -0,0 +1,74 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml new file mode 100644 index 00000000000..3536c80c9c8 --- /dev/null +++ b/zebrad/tests/common/configs/getblocktemplate-v1.0.1.toml @@ -0,0 +1,75 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[mining] +debug_like_zcashd = true + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 0 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..30ac9d46c1f --- /dev/null +++ b/zebrad/tests/common/configs/net-cache-custom-v1.0.0-rc.9.toml @@ -0,0 +1,16 @@ +# Custom network.cache_dir config parsing test + +[network] +# Enable the peer address cache with a custom path +cache_dir = "/tmp" + +# Use a custom seed peer config +# https://en.wikipedia.org/wiki/IPv6_address#Documentation +initial_mainnet_peers = [ + "192.0.2.0:8233", + "2001:db8::0:8233", +] +initial_testnet_peers = [ + "192.0.2.1:18233", + "2001:db8::1:18233", +] diff --git a/zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..c2d33eecc0e --- /dev/null +++ b/zebrad/tests/common/configs/net-cache-disabled-v1.0.0-rc.9.toml @@ -0,0 +1,9 @@ +# Custom network.cache_dir config parsing test + +[network] +# Disable the peer address cache +cache_dir = false + +# Disable seed peers as well, to create an isolated node +initial_mainnet_peers = [] +initial_testnet_peers = [] diff --git a/zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml b/zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml new file mode 100644 index 00000000000..40e119be9b3 --- /dev/null +++ b/zebrad/tests/common/configs/net-cache-enabled-v1.0.0-rc.9.toml @@ -0,0 +1,18 @@ +# Custom network.cache_dir config parsing test + +[network] +# Enable the peer address cache with the default path +cache_dir = true + +# Use the default seed peer config +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] diff --git a/zebrad/tests/common/configs/v1.0.0-rc.9.toml b/zebrad/tests/common/configs/v1.0.0-rc.9.toml new file mode 100644 index 00000000000..52cd503be0b --- /dev/null +++ b/zebrad/tests/common/configs/v1.0.0-rc.9.toml @@ -0,0 +1,71 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 1 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false + diff --git a/zebrad/tests/common/configs/v1.0.1.toml b/zebrad/tests/common/configs/v1.0.1.toml new file mode 100644 index 00000000000..02bac53da62 --- /dev/null +++ b/zebrad/tests/common/configs/v1.0.1.toml @@ -0,0 +1,71 @@ +# Default configuration for zebrad. +# +# This file can be used as a skeleton for custom configs. +# +# Unspecified fields use default values. Optional fields are Some(field) if the +# field is present and None if it is absent. +# +# This file is generated as an example using zebrad's current defaults. +# You should set only the config options you want to keep, and delete the rest. +# Only a subset of fields are present in the skeleton, since optional values +# whose default is None are omitted. +# +# The config format (including a complete list of sections and fields) is +# documented here: +# https://doc.zebra.zfnd.org/zebrad/config/struct.ZebradConfig.html +# +# zebrad attempts to load configs in the following order: +# +# 1. The -c flag on the command line, e.g., `zebrad -c myconfig.toml start`; +# 2. The file `zebrad.toml` in the users's preference directory (platform-dependent); +# 3. The default config. + +[consensus] +checkpoint_sync = true +debug_skip_parameter_preload = false + +[mempool] +eviction_memory_time = "1h" +tx_cost_limit = 80000000 + +[metrics] + +[network] +cache_dir = true +crawl_new_peer_interval = "1m 1s" +initial_mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "mainnet.seeder.zfnd.org:8233", + "mainnet.is.yolo.money:8233", +] +initial_testnet_peers = [ + "dnsseed.testnet.z.cash:18233", + "testnet.seeder.zfnd.org:18233", + "testnet.is.yolo.money:18233", +] +listen_addr = "0.0.0.0:8233" +max_connections_per_ip = 1 +network = "Mainnet" +peerset_initial_target_size = 25 + +[rpc] +debug_force_finished_sync = false +parallel_cpu_threads = 1 + +[state] +cache_dir = "cache_dir" +delete_old_database = true +ephemeral = false + +[sync] +checkpoint_verify_concurrency_limit = 1000 +download_concurrency_limit = 50 +full_verify_concurrency_limit = 20 +parallel_cpu_threads = 0 + +[tracing] +buffer_limit = 128000 +force_use_color = false +use_color = true +use_journald = false diff --git a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs index 78631f66bfb..e6bd3d3d9c4 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_block_template.rs @@ -21,7 +21,7 @@ use zebra_rpc::methods::get_block_template_rpcs::{ }; use crate::common::{ - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, sync::{check_sync_logs_until, MempoolBehavior, SYNC_FINISHED_REGEX}, test_type::TestType, }; @@ -66,7 +66,7 @@ pub(crate) async fn run() -> Result<()> { let network = Network::Mainnet; // Skip the test unless the user specifically asked for it and there is a zebrad_state_path - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } @@ -106,7 +106,15 @@ pub(crate) async fn run() -> Result<()> { .await?; let is_response_success = getblocktemplate_response.status().is_success(); - let response_text = getblocktemplate_response.text().await?; + + let mut response_text = getblocktemplate_response.text().await?; + // This string can be extremely long in logs. + if response_text.len() > 1003 { + let end = response_text.len() - 500; + // Replace the middle bytes with "...", but leave 500 bytes on either side. + // The response text is ascii, so this replacement won't panic. + response_text.replace_range(500..=end, "..."); + } tracing::info!( response_text, diff --git a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs index 30dbe7db3d1..5dd0fd81604 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs @@ -7,7 +7,7 @@ use zebra_node_services::rpc_client::RpcRequestClient; use zebra_rpc::methods::get_block_template_rpcs::types::peer_info::PeerInfo; use crate::common::{ - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, test_type::TestType, }; @@ -21,7 +21,7 @@ pub(crate) async fn run() -> Result<()> { let network = Network::Mainnet; // Skip the test unless the user specifically asked for it and there is a zebrad_state_path - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } @@ -29,7 +29,7 @@ pub(crate) async fn run() -> Result<()> { let (mut zebrad, zebra_rpc_address) = spawn_zebrad_for_rpc(network, test_name, test_type, true)? - .expect("Already checked zebra state path with can_spawn_zebrad_for_rpc"); + .expect("Already checked zebra state path with can_spawn_zebrad_for_test_type"); let rpc_address = zebra_rpc_address.expect("getpeerinfo test must have RPC port"); diff --git a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs index 571ffa14f04..8e606606389 100644 --- a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs +++ b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs @@ -15,7 +15,7 @@ use zebra_node_services::rpc_client::RpcRequestClient; use crate::common::{ cached_state::get_raw_future_blocks, - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, test_type::TestType, }; @@ -31,7 +31,7 @@ pub(crate) async fn run() -> Result<()> { let network = Network::Mainnet; // Skip the test unless the user specifically asked for it and there is a zebrad_state_path - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } @@ -50,7 +50,7 @@ pub(crate) async fn run() -> Result<()> { let should_sync = false; let (mut zebrad, zebra_rpc_address) = spawn_zebrad_for_rpc(network, test_name, test_type, should_sync)? - .expect("Already checked zebra state path with can_spawn_zebrad_for_rpc"); + .expect("Already checked zebra state path with can_spawn_zebrad_for_test_type"); let rpc_address = zebra_rpc_address.expect("submitblock test must have RPC port"); @@ -75,7 +75,10 @@ pub(crate) async fn run() -> Result<()> { let res_text = res.text().await?; // Test rpc endpoint response - assert!(res_text.contains(r#""result":null"#)); + assert!( + res_text.contains(r#""result":null"#), + "unexpected response from submitblock RPC, should be null, was: {res_text}" + ); } zebrad.kill(false)?; diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index ac8590ccba1..315a7e3cb6a 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -7,16 +7,17 @@ use std::{ env, + fmt::Debug, net::SocketAddr, path::{Path, PathBuf}, time::Duration, }; use color_eyre::eyre::Result; -use indexmap::IndexSet; use tempfile::TempDir; use zebra_chain::parameters::Network; +use zebra_network::CacheDir; use zebra_test::{ args, command::{Arguments, TestDirExt}, @@ -36,6 +37,10 @@ use crate::common::{ /// metrics or tracing test failures in Windows CI. pub const LAUNCH_DELAY: Duration = Duration::from_secs(15); +/// After we launch `zebrad`, wait this long in extended tests. +/// See [`LAUNCH_DELAY`] for details. +pub const EXTENDED_LAUNCH_DELAY: Duration = Duration::from_secs(45); + /// After we launch `lightwalletd`, wait this long for the command to start up, /// take the actions expected by the quick tests, and log the expected logs. /// @@ -167,9 +172,16 @@ where } fn cache_config_update_helper(self, config: &mut ZebradConfig) -> Result { + let dir = self.as_ref(); + let cache_dir = PathBuf::from(dir); + + // If the peer cache has already been disabled, don't re-enable it + if config.network.cache_dir.is_enabled() { + config.network.cache_dir = CacheDir::custom_path(&cache_dir); + } + + // Only replace the state cache directory if it's going to be used if !config.state.ephemeral { - let dir = self.as_ref(); - let cache_dir = PathBuf::from(dir); config.state.cache_dir = cache_dir; } @@ -209,7 +221,7 @@ where /// /// `zebra_rpc_address` is `None` if the test type doesn't need an RPC port. #[tracing::instrument] -pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( +pub fn spawn_zebrad_for_rpc + Debug>( network: Network, test_name: S, test_type: TestType, @@ -218,23 +230,16 @@ pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( let test_name = test_name.as_ref(); // Skip the test unless the user specifically asked for it - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, use_internet_connection) { return Ok(None); } // Get the zebrad config let mut config = test_type - .zebrad_config(test_name) + .zebrad_config(test_name, use_internet_connection, None) .expect("already checked config")?; - // TODO: move this into zebrad_config() config.network.network = network; - if !use_internet_connection { - config.network.initial_mainnet_peers = IndexSet::new(); - config.network.initial_testnet_peers = IndexSet::new(); - - config.mempool.debug_enable_at_height = Some(0); - } let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); @@ -250,13 +255,90 @@ pub fn spawn_zebrad_for_rpc + std::fmt::Debug>( Ok(Some((zebrad, config.rpc.listen_addr))) } +/// Spawns a zebrad instance on `network` without RPCs or `lightwalletd`. +/// +/// If `use_cached_state` is `true`, then update the cached state to the tip. +/// If `ephemeral` is `true`, then use an ephemeral state path. +/// If `reuse_state_path` is `Some(path)`, then use the state at that path, and take ownership of +/// the temporary directory, so it isn't deleted until the test ends. +/// Otherwise, just create an empty state in this test's new temporary directory. +/// +/// If `use_internet_connection` is `false` then spawn, but without any peers. +/// This prevents it from downloading blocks. Instead, use the `ZEBRA_CACHED_STATE_DIR` +/// environmental variable to provide an initial state to the zebrad instance. +/// +/// Returns: +/// - `Ok(Some(zebrad))` on success, +/// - `Ok(None)` if the test doesn't have the required network or cached state, and +/// - `Err(_)` if spawning zebrad fails. +#[tracing::instrument] +pub fn spawn_zebrad_without_rpc( + network: Network, + test_name: Str, + use_cached_state: bool, + ephemeral: bool, + reuse_state_path: Dir, + use_internet_connection: bool, +) -> Result>> +where + Str: AsRef + Debug, + Dir: Into> + Debug, +{ + use TestType::*; + + let test_name = test_name.as_ref(); + + let reuse_state_path = reuse_state_path.into(); + let testdir = reuse_state_path + .unwrap_or_else(|| testdir().expect("failed to create test temporary directory")); + + let (test_type, replace_cache_dir) = if use_cached_state { + (UpdateZebraCachedStateNoRpc, None) + } else if ephemeral { + ( + LaunchWithEmptyState { + launches_lightwalletd: false, + }, + None, + ) + } else { + (UseAnyState, Some(testdir.path())) + }; + + // Skip the test unless the user specifically asked for it + if !can_spawn_zebrad_for_test_type(test_name, test_type, use_internet_connection) { + return Ok(None); + } + + // Get the zebrad config + let mut config = test_type + .zebrad_config(test_name, use_internet_connection, replace_cache_dir) + .expect("already checked config")?; + + config.network.network = network; + + let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); + + // Writes a configuration that does not have RPC listen_addr set. + // If the state path env var is set, uses it in the config. + let zebrad = testdir + .with_exact_config(&config)? + .spawn_child(args!["start"])? + .bypass_test_capture(true) + .with_timeout(test_type.zebrad_timeout()) + .with_failure_regex_iter(zebrad_failure_messages, zebrad_ignore_messages); + + Ok(Some(zebrad)) +} + /// Returns `true` if a zebrad test for `test_type` has everything it needs to run. #[tracing::instrument] -pub fn can_spawn_zebrad_for_rpc + std::fmt::Debug>( +pub fn can_spawn_zebrad_for_test_type + Debug>( test_name: S, test_type: TestType, + use_internet_connection: bool, ) -> bool { - if zebra_test::net::zebra_skip_network_tests() { + if use_internet_connection && zebra_test::net::zebra_skip_network_tests() { return false; } @@ -267,8 +349,9 @@ pub fn can_spawn_zebrad_for_rpc + std::fmt::Debug>( return false; } - // Check if we have any necessary cached states for the zebrad config - test_type.zebrad_config(test_name).is_some() + // Check if we have any necessary cached states for the zebrad config. + // The cache_dir value doesn't matter here. + test_type.zebrad_config(test_name, true, None).is_some() } /// Panics if `$pred` is false, with an error report containing: diff --git a/zebrad/tests/common/lightwalletd/proto/compact_formats.proto b/zebrad/tests/common/lightwalletd/proto/compact_formats.proto index f2129f2cbf7..09df06d48be 100644 --- a/zebrad/tests/common/lightwalletd/proto/compact_formats.proto +++ b/zebrad/tests/common/lightwalletd/proto/compact_formats.proto @@ -1,4 +1,4 @@ -// Copyright (c) 2019-2020 The Zcash developers +// Copyright (c) 2019-2021 The Zcash developers // Distributed under the MIT software license, see the accompanying // file COPYING or https://www.opensource.org/licenses/mit-license.php . @@ -6,39 +6,50 @@ syntax = "proto3"; package cash.z.wallet.sdk.rpc; option go_package = "lightwalletd/walletrpc"; option swift_prefix = ""; + // Remember that proto3 fields are all optional. A field that is not present will be set to its zero value. // bytes fields of hashes are in canonical little-endian format. +// ChainMetadata represents information about the state of the chain as of a given block. +message ChainMetadata { + uint32 saplingCommitmentTreeSize = 1; // the size of the Sapling note commitment tree as of the end of this block + uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block +} + // CompactBlock is a packaging of ONLY the data from a block that's needed to: // 1. Detect a payment to your shielded Sapling address // 2. Detect a spend of your shielded Sapling notes // 3. Update your witnesses to generate new Sapling spend proofs. message CompactBlock { - uint32 protoVersion = 1; // the version of this wire format, for storage - uint64 height = 2; // the height of this block - bytes hash = 3; // the ID (hash) of this block, same as in block explorers - bytes prevHash = 4; // the ID (hash) of this block's predecessor - uint32 time = 5; // Unix epoch time when the block was mined - bytes header = 6; // (hash, prevHash, and time) OR (full header) - repeated CompactTx vtx = 7; // zero or more compact transactions from this block + uint32 protoVersion = 1; // the version of this wire format, for storage + uint64 height = 2; // the height of this block + bytes hash = 3; // the ID (hash) of this block, same as in block explorers + bytes prevHash = 4; // the ID (hash) of this block's predecessor + uint32 time = 5; // Unix epoch time when the block was mined + bytes header = 6; // (hash, prevHash, and time) OR (full header) + repeated CompactTx vtx = 7; // zero or more compact transactions from this block + ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block } // CompactTx contains the minimum information for a wallet to know if this transaction // is relevant to it (either pays to it or spends from it) via shielded elements // only. This message will not encode a transparent-to-transparent transaction. message CompactTx { + // Index and hash will allow the receiver to call out to chain + // explorers or other data structures to retrieve more information + // about this transaction. uint64 index = 1; // the index within the full block bytes hash = 2; // the ID (hash) of this transaction, same as in block explorers // The transaction fee: present if server can provide. In the case of a // stateless server and a transaction with transparent inputs, this will be // unset because the calculation requires reference to prior transactions. - // in a pure-Sapling context, the fee will be calculable as: - // valueBalance + (sum(vPubNew) - sum(vPubOld) - sum(tOut)) + // If there are no transparent inputs, the fee will be calculable as: + // valueBalanceSapling + valueBalanceOrchard + sum(vPubNew) - sum(vPubOld) - sum(tOut) uint32 fee = 3; - repeated CompactSaplingSpend spends = 4; // inputs - repeated CompactSaplingOutput outputs = 5; // outputs + repeated CompactSaplingSpend spends = 4; + repeated CompactSaplingOutput outputs = 5; repeated CompactOrchardAction actions = 6; } @@ -48,11 +59,14 @@ message CompactSaplingSpend { bytes nf = 1; // nullifier (see the Zcash protocol specification) } -// output is a Sapling Output Description as described in section 7.4 of the -// Zcash protocol spec. Total size is 948. +// output encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the +// `encCiphertext` field of a Sapling Output Description. These fields are described in +// section 7.4 of the Zcash protocol spec: +// https://zips.z.cash/protocol/protocol.pdf#outputencodingandconsensus +// Total size is 116 bytes. message CompactSaplingOutput { bytes cmu = 1; // note commitment u-coordinate - bytes epk = 2; // ephemeral public key + bytes ephemeralKey = 2; // ephemeral public key bytes ciphertext = 3; // first 52 bytes of ciphertext } @@ -62,5 +76,5 @@ message CompactOrchardAction { bytes nullifier = 1; // [32] The nullifier of the input note bytes cmx = 2; // [32] The x-coordinate of the note commitment for the output note bytes ephemeralKey = 3; // [32] An encoding of an ephemeral Pallas public key - bytes ciphertext = 4; // [52] The note plaintext component of the encCiphertext field + bytes ciphertext = 4; // [52] The first 52 bytes of the encCiphertext field } diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index 1db9d1211d4..72b99d57f94 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -30,7 +30,7 @@ use zebrad::components::mempool::downloads::MAX_INBOUND_CONCURRENCY; use crate::common::{ cached_state::get_future_blocks, - launch::{can_spawn_zebrad_for_rpc, spawn_zebrad_for_rpc}, + launch::{can_spawn_zebrad_for_test_type, spawn_zebrad_for_rpc}, lightwalletd::{ can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc, sync::wait_for_zebrad_and_lightwalletd_sync, @@ -45,7 +45,7 @@ use crate::common::{ /// TODO: replace with a const when `min()` stabilises as a const function: /// https://github.com/rust-lang/rust/issues/92391 fn max_sent_transactions() -> usize { - min(CHANNEL_AND_QUEUE_CAPACITY, MAX_INBOUND_CONCURRENCY) - 1 + min(CHANNEL_AND_QUEUE_CAPACITY, MAX_INBOUND_CONCURRENCY) / 2 } /// Number of blocks past the finalized to load transactions from. @@ -62,7 +62,7 @@ pub async fn run() -> Result<()> { let network = Mainnet; // Skip the test unless the user specifically asked for it - if !can_spawn_zebrad_for_rpc(test_name, test_type) { + if !can_spawn_zebrad_for_test_type(test_name, test_type, true) { return Ok(()); } diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs index 4b84e4e7753..2001f94f8f1 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs @@ -39,7 +39,7 @@ use color_eyre::eyre::Result; use zebra_chain::{ block::Block, parameters::Network, - parameters::NetworkUpgrade::{self, Canopy}, + parameters::NetworkUpgrade::{Nu5, Sapling}, serialization::ZcashDeserializeInto, }; @@ -145,27 +145,43 @@ pub async fn run() -> Result<()> { .await? .into_inner(); - // As we are using a pretty much synchronized blockchain, we can assume the tip is above the Canopy network upgrade - assert!(block_tip.height > Canopy.activation_height(network).unwrap().0 as u64); + // Get `Sapling` activation height. + let sapling_activation_height = Sapling.activation_height(network).unwrap().0 as u64; - // `lightwalletd` only supports post-Sapling blocks, so we begin at the - // Sapling activation height. - let sapling_activation_height = NetworkUpgrade::Sapling - .activation_height(network) - .unwrap() - .0 as u64; + // As we are using a pretty much synchronized blockchain, we can assume the tip is above the Nu5 network upgrade + assert!(block_tip.height > Nu5.activation_height(network).unwrap().0 as u64); - // Call `GetBlock` with block 1 height - let block_one = rpc_client + // The first block in the mainnet that has sapling and orchard information. + let block_with_trees = 1687107; + + // Call `GetBlock` with `block_with_trees`. + let get_block_response = rpc_client .get_block(BlockId { - height: sapling_activation_height, + height: block_with_trees, hash: vec![], }) .await? .into_inner(); - // Make sure we got block 1 back - assert_eq!(block_one.height, sapling_activation_height); + // Make sure we got block `block_with_trees` back + assert_eq!(get_block_response.height, block_with_trees); + + // Testing the `trees` field of `GetBlock`. + assert_eq!( + get_block_response + .chain_metadata + .clone() + .unwrap() + .sapling_commitment_tree_size, + 1170439 + ); + assert_eq!( + get_block_response + .chain_metadata + .unwrap() + .orchard_commitment_tree_size, + 2 + ); // Call `GetBlockRange` with the range starting at block 1 up to block 10 let mut block_range = rpc_client diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index 2da33067f2c..dd0a1390294 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -5,10 +5,7 @@ //! Test functions in this file will not be run. //! This file is only for test library code. -use std::{ - path::{Path, PathBuf}, - time::Duration, -}; +use std::{path::PathBuf, time::Duration}; use color_eyre::eyre::Result; use tempfile::TempDir; @@ -19,7 +16,6 @@ use zebrad::{components::sync, config::ZebradConfig}; use zebra_test::{args, prelude::*}; use super::{ - cached_state::copy_state_directory, config::{persistent_test_config, testdir}, launch::ZebradTestDirExt, }; @@ -52,7 +48,7 @@ pub const SYNC_PROGRESS_REGEX: &str = r"sync_percent"; /// The text that should be logged when Zebra loads its compiled-in checkpoints. #[cfg(feature = "zebra-checkpoints")] pub const CHECKPOINT_VERIFIER_REGEX: &str = - r"initializing chain verifier.*max_checkpoint_height.*=.*Height"; + r"initializing block verifier router.*max_checkpoint_height.*=.*Height"; /// The maximum amount of time Zebra should take to reload after shutting down. /// @@ -83,7 +79,7 @@ pub const FINISH_PARTIAL_SYNC_TIMEOUT: Duration = Duration::from_secs(11 * 60 * /// The maximum time to wait for Zebrad to synchronize up to the chain tip starting from the /// genesis block. -pub const FINISH_FULL_SYNC_TIMEOUT: Duration = Duration::from_secs(58 * 60 * 60); +pub const FINISH_FULL_SYNC_TIMEOUT: Duration = Duration::from_secs(72 * 60 * 60); /// The test sync height where we switch to using the default lookahead limit. /// @@ -341,30 +337,6 @@ pub fn check_sync_logs_until( Ok(zebrad) } -/// Runs a zebrad instance to synchronize the chain to the network tip. -/// -/// The zebrad instance is executed on a copy of the partially synchronized chain state. This copy -/// is returned afterwards, containing the fully synchronized chain state. -#[allow(dead_code)] -#[tracing::instrument] -pub async fn copy_state_and_perform_full_sync( - network: Network, - partial_sync_path: &Path, -) -> Result { - let fully_synced_path = copy_state_directory(network, &partial_sync_path).await?; - - sync_until( - Height::MAX, - network, - SYNC_FINISHED_REGEX, - FINISH_PARTIAL_SYNC_TIMEOUT, - fully_synced_path, - MempoolBehavior::ShouldAutomaticallyActivate, - true, - false, - ) -} - /// Returns a test config for caching Zebra's state up to the mandatory checkpoint. pub fn cached_mandatory_checkpoint_test_config() -> Result { let mut config = persistent_test_config()?; diff --git a/zebrad/tests/common/test_type.rs b/zebrad/tests/common/test_type.rs index a94420160b9..adb5fd4b897 100644 --- a/zebrad/tests/common/test_type.rs +++ b/zebrad/tests/common/test_type.rs @@ -1,7 +1,14 @@ //! Provides TestType enum with shared code for acceptance tests -use std::{env, path::PathBuf, time::Duration}; +use std::{ + env, + path::{Path, PathBuf}, + time::Duration, +}; + +use indexmap::IndexSet; +use zebra_network::CacheDir; use zebra_test::{command::NO_MATCHES_REGEX_ITER, prelude::*}; use zebrad::config::ZebradConfig; @@ -41,6 +48,9 @@ pub enum TestType { allow_lightwalletd_cached_state: bool, }, + /// Launch with a Zebra and lightwalletd state that might or might not be empty. + UseAnyState, + /// Sync to tip from a lightwalletd cached state. /// /// This test requires a cached Zebra and lightwalletd state. @@ -69,7 +79,7 @@ impl TestType { // - FullSyncFromGenesis, UpdateCachedState, UpdateZebraCachedStateNoRpc: // skip the test if it is not available match self { - LaunchWithEmptyState { .. } => false, + LaunchWithEmptyState { .. } | UseAnyState => false, FullSyncFromGenesis { .. } | UpdateCachedState | UpdateZebraCachedStateNoRpc @@ -81,16 +91,17 @@ impl TestType { pub fn needs_zebra_rpc_server(&self) -> bool { match self { UpdateZebraCachedStateWithRpc | LaunchWithEmptyState { .. } => true, - UpdateZebraCachedStateNoRpc | FullSyncFromGenesis { .. } | UpdateCachedState => { - self.launches_lightwalletd() - } + UseAnyState + | UpdateZebraCachedStateNoRpc + | FullSyncFromGenesis { .. } + | UpdateCachedState => self.launches_lightwalletd(), } } /// Does this test launch `lightwalletd`? pub fn launches_lightwalletd(&self) -> bool { match self { - UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, + UseAnyState | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, FullSyncFromGenesis { .. } | UpdateCachedState => true, LaunchWithEmptyState { launches_lightwalletd, @@ -106,6 +117,7 @@ impl TestType { // - UpdateCachedState: skip the test if it is not available match self { LaunchWithEmptyState { .. } + | UseAnyState | FullSyncFromGenesis { .. } | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, @@ -120,14 +132,17 @@ impl TestType { FullSyncFromGenesis { allow_lightwalletd_cached_state, } => *allow_lightwalletd_cached_state, - UpdateCachedState | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => true, + UseAnyState + | UpdateCachedState + | UpdateZebraCachedStateNoRpc + | UpdateZebraCachedStateWithRpc => true, } } /// Can this test create a new `LIGHTWALLETD_DATA_DIR` cached state? pub fn can_create_lightwalletd_cached_state(&self) -> bool { match self { - LaunchWithEmptyState { .. } => false, + LaunchWithEmptyState { .. } | UseAnyState => false, FullSyncFromGenesis { .. } | UpdateCachedState => true, UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => false, } @@ -152,9 +167,16 @@ impl TestType { /// Returns a Zebra config for this test. /// + /// `replace_cache_dir` replaces any cached or ephemeral state. + /// /// Returns `None` if the test should be skipped, /// and `Some(Err(_))` if the config could not be created. - pub fn zebrad_config>(&self, test_name: S) -> Option> { + pub fn zebrad_config>( + &self, + test_name: Str, + use_internet_connection: bool, + replace_cache_dir: Option<&Path>, + ) -> Option> { let config = if self.needs_zebra_rpc_server() { // This is what we recommend our users configure. random_known_rpc_port_config(true) @@ -177,22 +199,35 @@ impl TestType { config.rpc.parallel_cpu_threads = 0; } - if !self.needs_zebra_cached_state() { - return Some(Ok(config)); + if !use_internet_connection { + config.network.initial_mainnet_peers = IndexSet::new(); + config.network.initial_testnet_peers = IndexSet::new(); + // Avoid re-using cached peers from disk when we're supposed to be a disconnected instance + config.network.cache_dir = CacheDir::disabled(); + + // Activate the mempool immediately by default + config.mempool.debug_enable_at_height = Some(0); } + // Add a fake miner address for mining RPCs #[cfg(feature = "getblocktemplate-rpcs")] let _ = config.mining.miner_address.insert( zebra_chain::transparent::Address::from_script_hash(config.network.network, [0x7e; 20]), ); - let zebra_state_path = self.zebrad_state_path(test_name)?; + // If we have a cached state, or we don't want to be ephemeral, update the config to use it + if replace_cache_dir.is_some() || self.needs_zebra_cached_state() { + let zebra_state_path = replace_cache_dir + .map(|path| path.to_owned()) + .or_else(|| self.zebrad_state_path(test_name))?; - config.sync.checkpoint_verify_concurrency_limit = - zebrad::components::sync::DEFAULT_CHECKPOINT_CONCURRENCY_LIMIT; + config.state.ephemeral = false; + config.state.cache_dir = zebra_state_path; - config.state.ephemeral = false; - config.state.cache_dir = zebra_state_path; + // And reset the concurrency to the default value + config.sync.checkpoint_verify_concurrency_limit = + zebrad::components::sync::DEFAULT_CHECKPOINT_CONCURRENCY_LIMIT; + } Some(Ok(config)) } @@ -237,7 +272,7 @@ impl TestType { /// Returns the `zebrad` timeout for this test type. pub fn zebrad_timeout(&self) -> Duration { match self { - LaunchWithEmptyState { .. } => LIGHTWALLETD_DELAY, + LaunchWithEmptyState { .. } | UseAnyState => LIGHTWALLETD_DELAY, FullSyncFromGenesis { .. } => LIGHTWALLETD_FULL_SYNC_TIP_DELAY, UpdateCachedState | UpdateZebraCachedStateNoRpc => LIGHTWALLETD_UPDATE_TIP_DELAY, UpdateZebraCachedStateWithRpc => FINISH_PARTIAL_SYNC_TIMEOUT, @@ -254,7 +289,7 @@ impl TestType { // We use the same timeouts for zebrad and lightwalletd, // because the tests check zebrad and lightwalletd concurrently. match self { - LaunchWithEmptyState { .. } => LIGHTWALLETD_DELAY, + LaunchWithEmptyState { .. } | UseAnyState => LIGHTWALLETD_DELAY, FullSyncFromGenesis { .. } => LIGHTWALLETD_FULL_SYNC_TIP_DELAY, UpdateCachedState | UpdateZebraCachedStateNoRpc | UpdateZebraCachedStateWithRpc => { LIGHTWALLETD_UPDATE_TIP_DELAY