diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..5bf6880b --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,6 @@ +[target.'cfg(all())'] +rustflags = ["--cfg", "tokio_unstable"] + +[profile.profiling] +inherits = "release" +debug = true diff --git a/.editorconfig b/.editorconfig index 90b66a48..57d60119 100644 --- a/.editorconfig +++ b/.editorconfig @@ -22,3 +22,6 @@ indent_size = 2 [*.proto] indent_size = 2 + +[*.fbs] +indent_size = 2 diff --git a/.gitignore b/.gitignore index 7efb3ad9..0a0ce5cf 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ /.devcontainer/.profile .pre-commit-config.yaml .envrc +perf.data +perf.data.old diff --git a/Cargo.lock b/Cargo.lock index 94751006..6c52e3b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,87 +2,41 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array 0.14.7", -] - -[[package]] -name = "aead-gcm-stream" -version = "0.1.0" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a09ecb526d53de2842cc876ee5c9b51161ee60399edeca4cf74892a01b48177" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] [[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] - -[[package]] -name = "aes-kw" -version = "0.2.1" +name = "ahash" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fa2b352dcefb5f7f3a5fb840e02665d311d878955380515e4fd50095dd3d8c" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "aes", + "getrandom", + "once_cell", + "version_check", ] [[package]] @@ -91,9 +45,8 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", - "const-random", - "getrandom 0.2.12", + "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -101,517 +54,717 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.4" +name = "allocator-api2" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] -name = "alloc-stdlib" -version = "0.2.2" +name = "alloy-chains" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +checksum = "94c225801d42099570d0674701dddd4142f0ef715282aeb5985042e2ec962df7" dependencies = [ - "alloc-no-stdlib", + "num_enum", + "strum", ] [[package]] -name = "allocator-api2" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" - -[[package]] -name = "android-tzdata" -version = "0.1.1" +name = "alloy-consensus" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "serde", +] [[package]] -name = "android_system_properties" -version = "0.1.5" +name = "alloy-eip2930" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" dependencies = [ - "libc", + "alloy-primitives", + "alloy-rlp", + "serde", ] [[package]] -name = "anstream" -version = "0.6.13" +name = "alloy-eip7702" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "utf8parse", + "alloy-primitives", + "alloy-rlp", + "k256", + "serde", ] [[package]] -name = "anstyle" -version = "1.0.6" +name = "alloy-eips" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "derive_more", + "once_cell", + "serde", + "sha2", +] [[package]] -name = "anstyle-parse" -version = "0.2.3" +name = "alloy-json-rpc" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" dependencies = [ - "utf8parse", + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", ] [[package]] -name = "anstyle-query" -version = "1.0.2" +name = "alloy-network" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" -dependencies = [ - "windows-sys 0.52.0", +checksum = "fb3705ce7d8602132bcf5ac7a1dd293a42adc2f183abf5907c30ac535ceca049" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "thiserror", ] [[package]] -name = "anstyle-wincon" -version = "3.0.2" +name = "alloy-network-primitives" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" dependencies = [ - "anstyle", - "windows-sys 0.52.0", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", ] [[package]] -name = "anyhow" -version = "1.0.81" +name = "alloy-primitives" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" - -[[package]] -name = "apibara-cli" -version = "0.5.1" +checksum = "a289ffd7448036f2f436b377f981c79ce0b2090877bad938d43387dc09931877" dependencies = [ - "apibara-core", - "apibara-observability", - "apibara-script", - "apibara-sdk", - "apibara-sink-common", - "async-compression", - "clap", - "colored", - "dirs", - "error-stack", - "float-cmp", - "futures 0.3.30", - "octocrab", - "reqwest", + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "foldhash", + "hashbrown 0.15.0", + "hex-literal", + "indexmap 2.6.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash", "serde", - "serde_json", - "similar-asserts", - "tabled", - "tar", - "tempfile", - "tokio 1.36.0", - "tokio-stream", - "tokio-util", - "tracing", - "walkdir", + "sha3", + "tiny-keccak", ] [[package]] -name = "apibara-core" -version = "0.1.0" -dependencies = [ - "hex", - "pbjson", - "pbjson-build", - "pbjson-types", - "prost", - "quickcheck", - "quickcheck_macros", +name = "alloy-provider" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "927f708dd457ed63420400ee5f06945df9632d5d101851952056840426a10dc5" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-transport", + "alloy-transport-http", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "pin-project", + "reqwest 0.12.8", "serde", "serde_json", - "starknet", "thiserror", - "tokio 1.36.0", - "tonic 0.9.2", - "tonic-build 0.9.2", + "tokio", "tracing", + "url", ] [[package]] -name = "apibara-node" -version = "0.1.0" +name = "alloy-rlp" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" dependencies = [ - "apibara-core", - "apibara-observability", + "alloy-rlp-derive", "arrayvec", - "assert_matches", - "async-stream", - "async-trait", - "byte-unit", - "byteorder", - "dirs", - "futures 0.3.30", - "governor", - "hyper 0.14.28", - "lazy_static", - "libmdbx", - "opentelemetry", - "opentelemetry-otlp", - "pin-project", - "prost", - "tempfile", - "thiserror", - "tokio 1.36.0", - "tokio-stream", - "tokio-util", - "tonic 0.9.2", - "tower", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", - "tracing-tree", + "bytes", ] [[package]] -name = "apibara-observability" -version = "0.1.0" +name = "alloy-rlp-derive" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ - "error-stack", - "opentelemetry", - "opentelemetry-otlp", - "tracing", - "tracing-opentelemetry", - "tracing-subscriber", - "tracing-tree", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "apibara-operator" -version = "0.2.2" +name = "alloy-rpc-client" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d82952dca71173813d4e5733e2c986d8b04aea9e0f3b0a576664c232ad050a5" dependencies = [ - "apibara-observability", - "clap", - "ctrlc", - "error-stack", - "futures 0.3.30", - "k8s-openapi", - "kube", - "schemars", + "alloy-json-rpc", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest 0.12.8", "serde", "serde_json", - "serde_yaml", - "tokio 1.36.0", + "tokio", "tokio-stream", - "tokio-util", + "tower 0.5.1", "tracing", + "url", ] [[package]] -name = "apibara-runner-common" -version = "0.1.0" -dependencies = [ - "error-stack", - "prost", - "tonic 0.9.2", - "tonic-build 0.9.2", -] - -[[package]] -name = "apibara-runner-local" -version = "0.1.0" +name = "alloy-rpc-types" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64333d639f2a0cf73491813c629a405744e16343a4bc5640931be707c345ecc5" dependencies = [ - "apibara-observability", - "apibara-runner-common", - "apibara-sink-common", - "clap", - "ctrlc", - "error-stack", - "portpicker", - "tempfile", - "tokio 1.36.0", - "tokio-stream", - "tokio-util", - "tonic 0.9.2", - "tonic-health", - "tonic-reflection", - "tracing", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", ] [[package]] -name = "apibara-script" -version = "0.1.0" +name = "alloy-rpc-types-beacon" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7081d2206dca51ce23a06338d78d9b536931cc3f15134fc1c6535eb2b77f18" dependencies = [ - "assert_matches", - "data-url 0.2.0", - "deno_ast", - "deno_core", - "deno_runtime", - "error-stack", - "reqwest", + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-engine", "serde", - "serde_json", - "serde_v8 0.134.0", - "tempfile", - "tokio 1.36.0", - "tracing", + "serde_with", + "thiserror", ] [[package]] -name = "apibara-sdk" -version = "0.1.0" +name = "alloy-rpc-types-engine" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1464c4dd646e1bdfde86ae65ce5ba168dbb29180b478011fe87117ae46b1629b" dependencies = [ - "apibara-core", - "async-trait", - "error-stack", - "futures 0.3.30", - "futures-util", - "hex", - "http 0.2.12", - "hyper 0.14.28", - "pin-project", - "prost", + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "derive_more", + "jsonwebtoken", + "rand 0.8.5", "serde", - "tokio 1.36.0", - "tokio-stream", - "tokio-util", - "tonic 0.9.2", - "tracing", ] [[package]] -name = "apibara-sink-common" -version = "0.1.0" +name = "alloy-rpc-types-eth" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83aa984386deda02482660aa31cb8ca1e63d533f1c31a52d7d181ac5ec68e9b8" dependencies = [ - "anstyle", - "apibara-core", - "apibara-observability", - "apibara-script", - "apibara-sdk", - "apibara-sink-options-derive", - "assert_matches", - "async-trait", - "bytesize", - "clap", - "ctrlc", - "dotenvy", - "error-stack", - "etcd-client", - "exponential-backoff", - "futures 0.3.30", - "lazy_static", - "prost", - "redis", - "regex", + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "cfg-if", + "derive_more", + "hashbrown 0.14.5", + "itertools 0.13.0", "serde", "serde_json", - "tempdir", - "testcontainers", - "tokio 1.36.0", - "tokio-stream", - "tokio-util", - "tonic 0.9.2", - "tonic-build 0.9.2", - "tonic-health", - "tonic-reflection", - "tracing", - "warp", ] [[package]] -name = "apibara-sink-console" -version = "0.6.1" +name = "alloy-serde" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" dependencies = [ - "apibara-core", - "apibara-observability", - "apibara-sink-common", - "async-trait", - "clap", - "error-stack", - "jemallocator", - "prost", + "alloy-primitives", "serde", "serde_json", - "tokio 1.36.0", - "tokio-util", - "tracing", ] [[package]] -name = "apibara-sink-mongo" -version = "0.9.2" -dependencies = [ - "apibara-core", - "apibara-observability", - "apibara-sink-common", +name = "alloy-signer" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" +dependencies = [ + "alloy-primitives", "async-trait", - "clap", - "error-stack", + "auto_impl", + "elliptic-curve 0.13.8", + "k256", + "thiserror", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0409e3ba5d1de409997a7db8b8e9d679d52088c1dee042a85033affd3cadeab4" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18372ef450d59f74c7a64a738f546ba82c92f816597fed1802ef559304c81f1" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap 2.6.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.79", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7bad89dd0d5f109e8feeaf787a9ed7a05a91a9a0efc6687d147a70ebca8eff7" +dependencies = [ + "const-hex", + "dunce", + "heck", + "proc-macro2", + "quote", + "syn 2.0.79", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa666f1036341b46625e72bd36878bf45ad0185f1b88601223e1ec6ed4b72b1" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-transport" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33616b2edf7454302a1d48084db185e52c309f73f6c10be99b0fe39354b3f1e9" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", "futures-util", - "jemallocator", - "mongodb", + "futures-utils-wasm", "serde", "serde_json", - "testcontainers", - "tokio 1.36.0", - "tokio-util", + "thiserror", + "tokio", + "tower 0.5.1", "tracing", + "url", ] [[package]] -name = "apibara-sink-options-derive" -version = "0.1.0" +name = "alloy-transport-http" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a944f5310c690b62bbb3e7e5ce34527cbd36b2d18532a797af123271ce595a49" dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.8", + "serde_json", + "tower 0.5.1", + "tracing", + "url", ] [[package]] -name = "apibara-sink-options-derive-tests" -version = "0.1.0" +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" + +[[package]] +name = "apibara-benchmark" +version = "0.0.0" +dependencies = [ + "apibara-dna-common", + "apibara-dna-protocol", + "apibara-observability", + "byte-unit", + "clap", + "ctrlc", + "error-stack", + "futures", + "hex", + "prost", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tracing", +] + +[[package]] +name = "apibara-dna-beaconchain" +version = "0.0.0" dependencies = [ - "apibara-sink-common", - "apibara-sink-options-derive", + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-beacon", + "apibara-dna-common", + "apibara-dna-protocol", + "apibara-observability", + "byte-unit", + "clap", + "ctrlc", + "error-stack", + "futures", + "hex", + "mimalloc", + "prost", + "prost-types", + "reqwest 0.12.8", + "rkyv 0.8.8", + "roaring", "serde", "serde_json", + "serde_with", + "tokio", + "tokio-util", + "tonic", + "tracing", + "url", ] [[package]] -name = "apibara-sink-parquet" -version = "0.7.1" +name = "apibara-dna-common" +version = "0.0.0" dependencies = [ - "apibara-core", + "anyhow", + "apibara-dna-protocol", + "apibara-etcd", "apibara-observability", - "apibara-sink-common", - "arrow", - "async-trait", "aws-config", "aws-sdk-s3", + "byte-unit", + "bytes", "clap", + "crc32fast", + "dirs", "error-stack", - "jemallocator", - "parquet", - "serde", - "serde_json", + "etcd-client", + "foyer", + "futures", + "hex", + "memmap2", + "prost", + "rand 0.8.5", + "rkyv 0.8.8", + "roaring", "tempdir", - "tokio 1.36.0", + "tempfile", + "testcontainers", + "tokio", + "tokio-stream", "tokio-util", + "tonic", + "tonic-health", + "tonic-reflection", "tracing", + "zstd", ] [[package]] -name = "apibara-sink-postgres" -version = "0.8.1" +name = "apibara-dna-evm" +version = "0.0.0" dependencies = [ - "apibara-core", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-transport", + "apibara-dna-common", + "apibara-dna-protocol", "apibara-observability", - "apibara-sink-common", - "async-trait", + "byte-unit", "clap", + "ctrlc", "error-stack", - "jemallocator", - "native-tls", - "postgres-native-tls", + "futures", + "hex", + "mimalloc", + "prost", + "prost-types", + "reqwest 0.12.8", + "rkyv 0.8.8", + "roaring", "serde", "serde_json", - "testcontainers", - "tokio 1.36.0", - "tokio-postgres", + "serde_with", + "tokio", "tokio-util", + "tonic", "tracing", + "url", ] [[package]] -name = "apibara-sink-webhook" -version = "0.7.1" +name = "apibara-dna-protocol" +version = "0.1.0" dependencies = [ - "apibara-core", - "apibara-observability", - "apibara-sink-common", - "async-trait", - "clap", "error-stack", - "http 0.2.12", - "jemallocator", + "hex", + "pin-project", "prost", - "reqwest", + "prost-types", "serde", "serde_json", - "tokio 1.36.0", + "tokio-stream", + "tonic", + "tonic-build", +] + +[[package]] +name = "apibara-etcd" +version = "0.1.0" +dependencies = [ + "error-stack", + "etcd-client", + "futures", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "apibara-etcd-dbg" +version = "0.1.0" +dependencies = [ + "apibara-etcd", + "apibara-observability", + "clap", + "error-stack", + "tokio", + "tokio-stream", "tokio-util", "tracing", - "wiremock", +] + +[[package]] +name = "apibara-observability" +version = "0.1.0" +dependencies = [ + "error-stack", + "nu-ansi-term 0.50.1", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry_sdk", + "time", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", ] [[package]] name = "apibara-starknet" version = "1.6.2" dependencies = [ - "apibara-core", - "apibara-node", - "apibara-sdk", - "assert_matches", - "bloomfilter", + "apibara-dna-common", + "apibara-dna-protocol", + "apibara-observability", "byte-unit", - "byteorder", "clap", "ctrlc", "error-stack", - "futures 0.3.30", - "futures-channel", - "futures-util", + "futures", "hex", - "hyper 0.14.28", - "jemallocator", - "lazy_static", - "mockall", - "pbjson-types", - "pin-project", + "mimalloc", "prost", - "quickcheck", - "quickcheck_macros", - "reqwest", + "prost-types", + "reqwest 0.12.8", + "rkyv 0.8.8", + "roaring", "serde", "serde_json", + "serde_with", "starknet", - "tempdir", - "tempfile", - "testcontainers", - "thiserror", - "tokio 1.36.0", - "tokio-stream", - "tokio-tungstenite 0.19.0", + "tokio", "tokio-util", - "tonic 0.9.2", - "tonic-build 0.9.2", - "tonic-health", - "tonic-reflection", - "tower", + "tonic", "tracing", - "tracing-futures", "url", - "warp", ] [[package]] -name = "arc-swap" -version = "1.7.0" +name = "ark-ff" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] [[package]] name = "ark-ff" @@ -619,355 +772,138 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" dependencies = [ - "ark-ff-asm", - "ark-ff-macros", - "ark-serialize", - "ark-std", + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", "derivative", "digest 0.10.7", - "itertools", + "itertools 0.10.5", "num-bigint", "num-traits", "paste", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "zeroize", ] +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "ark-ff-asm" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.35", + "quote", "syn 1.0.109", ] [[package]] name = "ark-ff-macros" -version = "0.4.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.79", - "quote 1.0.35", + "quote", "syn 1.0.109", ] [[package]] -name = "ark-serialize" +name = "ark-ff-macros" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ - "ark-std", - "digest 0.10.7", "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "ark-std" -version = "0.4.0" +name = "ark-serialize" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - -[[package]] -name = "arrow" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a46441ae78c0c5915f62aa32cad9910647c19241456dd24039646dd96d494a5" -dependencies = [ - "ahash", - "arrow-arith", - "arrow-array", - "arrow-buffer", - "arrow-cast", - "arrow-data", - "arrow-json", - "arrow-ord", - "arrow-row", - "arrow-schema", - "arrow-select", - "arrow-string", -] - -[[package]] -name = "arrow-arith" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350c5067470aeeb38dcfcc1f7e9c397098116409c9087e43ca99c231020635d9" -dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "chrono", - "half", - "num", -] - -[[package]] -name = "arrow-array" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6049e031521c4e7789b7530ea5991112c0a375430094191f3b74bdf37517c9a9" -dependencies = [ - "ahash", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "chrono", - "half", - "hashbrown 0.13.2", - "num", -] - -[[package]] -name = "arrow-buffer" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a83450b94b9fe018b65ba268415aaab78757636f68b7f37b6bc1f2a3888af0a0" -dependencies = [ - "half", - "num", -] - -[[package]] -name = "arrow-cast" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249198411254530414805f77e88e1587b0914735ea180f906506905721f7a44a" -dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "arrow-select", - "chrono", - "lexical-core", - "num", -] - -[[package]] -name = "arrow-data" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d48dcbed83d741d4af712af17f6d952972b8f6491b24ee2415243a7e37c6438" -dependencies = [ - "arrow-buffer", - "arrow-schema", - "half", - "num", -] - -[[package]] -name = "arrow-ipc" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea8d7b138c5414aeef5dd08abacf362f87ed9b1168ea38d60a6f67590c3f7d99" -dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-cast", - "arrow-data", - "arrow-schema", - "flatbuffers", -] - -[[package]] -name = "arrow-json" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3a597fdca885a81f2e7ab0bacaa0bd2dfefb4cd6a2e5a3d1677396a68673101" -dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-cast", - "arrow-data", - "arrow-schema", - "chrono", - "half", - "indexmap 1.9.3", - "lexical-core", - "num", - "serde", - "serde_json", -] - -[[package]] -name = "arrow-ord" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29be2d5fadaab29e4fa6a7e527ceaa1c2cddc57dc6d86c062f7a05adcd8df71e" -dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "arrow-select", - "half", - "num", -] - -[[package]] -name = "arrow-row" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e0bd6ad24d56679b3317b499b0de61bca16d3142896908cce1aa943e56e981" -dependencies = [ - "ahash", - "arrow-array", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "half", - "hashbrown 0.13.2", -] - -[[package]] -name = "arrow-schema" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b71d8d68d0bc2e648e4e395896dc518be8b90c5f0f763c59083187c3d46184b" - -[[package]] -name = "arrow-select" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "470cb8610bdfda56554a436febd4e457e506f3c42e01e545a1ea7ecf2a4c8823" -dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "num", + "ark-std 0.3.0", + "digest 0.9.0", ] [[package]] -name = "arrow-string" -version = "41.0.0" +name = "ark-serialize" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f8a2e4ff9dbbd51adbabf92098b71e3eb2ef0cfcb75236ca7c3ce087cce038" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ - "arrow-array", - "arrow-buffer", - "arrow-data", - "arrow-schema", - "arrow-select", - "regex", - "regex-syntax 0.7.5", + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", ] [[package]] -name = "asn1-rs" -version = "0.5.2" +name = "ark-std" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", - "displaydoc", - "nom 7.1.3", "num-traits", - "rusticata-macros", - "thiserror", - "time", + "rand 0.8.5", ] [[package]] -name = "asn1-rs-derive" +name = "ark-std" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "num-traits", + "rand 0.8.5", ] [[package]] -name = "assert-json-diff" -version = "2.0.2" +name = "array-util" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +checksum = "7e509844de8f09b90a2c3444684a2b6695f4071360e13d2fda0af9f749cc2ed6" dependencies = [ - "serde", - "serde_json", + "arrayvec", ] [[package]] -name = "assert_matches" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" - -[[package]] -name = "ast_node" -version = "0.9.5" +name = "arrayvec" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c09c69dffe06d222d072c878c3afe86eee2179806f20503faec97250268b4c24" -dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "swc_macros_common", - "syn 2.0.52", -] +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-channel" -version = "1.9.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-compression" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" -dependencies = [ - "brotli", - "flate2", + "event-listener-strategy", "futures-core", - "memchr", "pin-project-lite", - "tokio 1.36.0", ] [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -976,24 +912,48 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "auto_enums" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "459b77b7e855f875fd15f101064825cd79eb83185a961d66e6298560126facfb" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "derive_utils", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -1002,22 +962,22 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-config" -version = "1.1.8" +version = "1.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4084d18094aec9f79d509f4cb6ccf6b613c5037e32f32e74312e52b836e366" +checksum = "7198e6f03240fdceba36656d8be440297b6b82270325908c7381f37d826a74f6" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1031,23 +991,23 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", - "fastrand 2.0.1", + "bytes", + "fastrand", "hex", "http 0.2.12", - "hyper 0.14.28", - "ring 0.17.8", + "ring", "time", - "tokio 1.36.0", + "tokio", "tracing", + "url", "zeroize", ] [[package]] name = "aws-credential-types" -version = "1.1.8" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa8587ae17c8e967e4b05a62d495be2fb7701bec52a97f7acfe8a29f938384c8" +checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -1057,35 +1017,37 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.1.8" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13dc54b4b49f8288532334bba8f87386a40571c47c37b1304979b556dc613c8" +checksum = "a10d5c055aa540164d9561a0e2e74ad30f0dcf7393c3a92f6733ddf9c5762468" dependencies = [ "aws-credential-types", "aws-sigv4", "aws-smithy-async", "aws-smithy-eventstream", "aws-smithy-http", + "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", - "fastrand 2.0.1", + "bytes", + "fastrand", "http 0.2.12", "http-body 0.4.6", + "once_cell", "percent-encoding", "pin-project-lite", "tracing", - "uuid 1.7.0", + "uuid 1.10.0", ] [[package]] name = "aws-sdk-s3" -version = "1.18.0" +version = "1.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be7167516f69aff3acca64e47c993336105e62f008067d2695324dfa5cbfdba6" +checksum = "e2f2a62020f3e06f9b352b2a23547f6e1d110b6bf1e18a6b588ae36114eaf6e2" dependencies = [ - "ahash", + "ahash 0.8.11", "aws-credential-types", "aws-runtime", "aws-sigv4", @@ -1099,8 +1061,8 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "bytes 1.5.0", - "fastrand 2.0.1", + "bytes", + "fastrand", "hex", "hmac", "http 0.2.12", @@ -1116,9 +1078,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.16.0" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6729c96a2bc5acdbc0d6f406415678c24de30a9999f33084a34e64fc415cc365" +checksum = "e33ae899566f3d395cbf42858e433930682cc9c1889fa89318896082fef45efb" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1129,7 +1091,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes", "http 0.2.12", "once_cell", "regex-lite", @@ -1138,9 +1100,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.16.0" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccdd38f35f089c16fe0641cda34f2d06e3ab7b99a884407bce350a9fa70b1a9" +checksum = "f39c09e199ebd96b9f860b0fce4b6625f211e064ad7c8693b72ecf7ef03881e0" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1151,7 +1113,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "bytes 1.5.0", + "bytes", "http 0.2.12", "once_cell", "regex-lite", @@ -1160,9 +1122,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.16.0" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e8064892a3c08b25b60fe3abda7ff5afa74efee500572cae65122ba5afd0d" +checksum = "3d95f93a98130389eb6233b9d615249e543f6c24a68ca1f109af9ca5164a8765" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1183,16 +1145,16 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.0" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d6f29688a4be9895c0ba8bef861ad0c0dac5c15e9618b9b7a6c233990fc263" +checksum = "cc8db6904450bafe7473c6ca9123f88cc11089e41a025408f992db4e22d3be68" dependencies = [ "aws-credential-types", "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.5.0", + "bytes", "crypto-bigint 0.5.5", "form_urlencoded", "hex", @@ -1200,9 +1162,9 @@ dependencies = [ "http 0.2.12", "http 1.1.0", "once_cell", - "p256 0.11.1", + "p256", "percent-encoding", - "ring 0.17.8", + "ring", "sha2", "subtle", "time", @@ -1212,24 +1174,24 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.1.8" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26ea8fa03025b2face2b3038a63525a10891e3d8829901d502e5384a0d8cd46" +checksum = "62220bc6e97f946ddd51b5f1361f78996e704677afc518a4ff66b7a72ea1378c" dependencies = [ "futures-util", "pin-project-lite", - "tokio 1.36.0", + "tokio", ] [[package]] name = "aws-smithy-checksums" -version = "0.60.7" +version = "0.60.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fa43bc04a6b2441968faeab56e68da3812f978a670a5db32accbdcafddd12f" +checksum = "598b1689d001c4d4dc3cb386adb07d37786783aee3ac4b324bcadac116bf3d23" dependencies = [ "aws-smithy-http", "aws-smithy-types", - "bytes 1.5.0", + "bytes", "crc32c", "crc32fast", "hex", @@ -1244,25 +1206,25 @@ dependencies = [ [[package]] name = "aws-smithy-eventstream" -version = "0.60.4" +version = "0.60.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6363078f927f612b970edf9d1903ef5cef9a64d1e8423525ebb1f0a1633c858" +checksum = "cef7d0a272725f87e51ba2bf89f8c21e4df61b9e49ae1ac367a6d69916ef7c90" dependencies = [ "aws-smithy-types", - "bytes 1.5.0", + "bytes", "crc32fast", ] [[package]] name = "aws-smithy-http" -version = "0.60.7" +version = "0.60.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f10fa66956f01540051b0aa7ad54574640f748f9839e843442d99b970d3aff9" +checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6" dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.5.0", + "bytes", "bytes-utils", "futures-core", "http 0.2.12", @@ -1295,58 +1257,63 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.1.8" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec81002d883e5a7fd2bb063d6fb51c4999eb55d404f4fff3dd878bf4733b9f01" +checksum = "d1ce695746394772e7000b39fe073095db6d45a862d0767dd5ad0ac0d7f8eb87" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", - "bytes 1.5.0", - "fastrand 2.0.1", - "h2", + "bytes", + "fastrand", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", - "hyper-rustls", + "http-body 1.0.1", + "httparse", + "hyper 0.14.30", + "hyper-rustls 0.24.2", "once_cell", "pin-project-lite", "pin-utils", - "rustls", - "tokio 1.36.0", + "rustls 0.21.12", + "tokio", "tracing", ] [[package]] name = "aws-smithy-runtime-api" -version = "1.2.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9acb931e0adaf5132de878f1398d83f8677f90ba70f01f65ff87f6d7244be1c5" +checksum = "e086682a53d3aa241192aa110fa8dfce98f2f5ac2ead0de84d41582c7e8fdb96" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "bytes 1.5.0", + "bytes", "http 0.2.12", "http 1.1.0", "pin-project-lite", - "tokio 1.36.0", + "tokio", "tracing", "zeroize", ] [[package]] name = "aws-smithy-types" -version = "1.1.8" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abe14dceea1e70101d38fbf2a99e6a34159477c0fb95e68e05c66bd7ae4c3729" +checksum = "147100a7bea70fa20ef224a6bad700358305f5dc0f84649c53769761395b355b" dependencies = [ - "base64-simd 0.8.0", - "bytes 1.5.0", + "base64-simd", + "bytes", "bytes-utils", "futures-core", "http 0.2.12", + "http 1.1.0", "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", "itoa", "num-integer", "pin-project-lite", @@ -1354,4895 +1321,2828 @@ dependencies = [ "ryu", "serde", "time", - "tokio 1.36.0", + "tokio", "tokio-util", ] [[package]] name = "aws-smithy-xml" -version = "0.60.7" +version = "0.60.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872c68cf019c0e4afc5de7753c4f7288ce4b71663212771bf5e4542eb9346ca9" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" dependencies = [ "xmlparser", ] -[[package]] -name = "aws-types" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dbf2f3da841a8930f159163175cf6a3d16ddde517c1b0fba7aa776822800f40" -dependencies = [ - "aws-credential-types", - "aws-smithy-async", - "aws-smithy-runtime-api", - "aws-smithy-types", - "http 0.2.12", - "rustc_version 0.4.0", - "tracing", -] - -[[package]] -name = "axum" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core", - "bitflags 1.3.2", - "bytes 1.5.0", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper", - "tower", - "tower-layer", - "tower-service", -] - -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes 1.5.0", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "mime", - "rustversion", - "tower-layer", - "tower-service", -] - -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "getrandom 0.2.12", - "instant", - "rand 0.8.5", -] - -[[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - -[[package]] -name = "base16ct" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "base64-simd" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "781dd20c3aff0bd194fe7d2a977dd92f21c173891f3a03b677359e5fa457e5d5" -dependencies = [ - "simd-abstraction", -] - -[[package]] -name = "base64-simd" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" -dependencies = [ - "outref 0.5.1", - "vsimd", -] - -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - -[[package]] -name = "better_scoped_tls" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "794edcc9b3fb07bb4aecaa11f093fd45663b4feadb782d68303a2268bc2701de" -dependencies = [ - "scoped-tls", -] - -[[package]] -name = "bigdecimal" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", - "serde", -] - -[[package]] -name = "bindgen" -version = "0.63.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36d860121800b2a9a94f9b5604b332d5cffb234ce17609ea479d723dbc9d3885" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "proc-macro2 1.0.79", - "quote 1.0.35", - "regex", - "rustc-hash", - "shlex", - "syn 1.0.109", -] - -[[package]] -name = "bit-vec" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "block-padding" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" -dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "bloomfilter" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b64d54e47a7f4fd723f082e8f11429f3df6ba8adaeca355a76556f9f0602bbcf" -dependencies = [ - "bit-vec", - "getrandom 0.2.12", - "siphasher 1.0.0", -] - -[[package]] -name = "bollard-stubs" -version = "1.42.0-rc.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed59b5c00048f48d7af971b71f800fdf23e858844a6f9e4d32ca72e9399e7864" -dependencies = [ - "serde", - "serde_with 1.14.0", -] - -[[package]] -name = "brotli" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - -[[package]] -name = "bson" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce21468c1c9c154a85696bb25c20582511438edb6ad67f846ba1378ffdd80222" -dependencies = [ - "ahash", - "base64 0.13.1", - "bitvec", - "hex", - "indexmap 2.2.5", - "js-sys", - "once_cell", - "rand 0.8.5", - "serde", - "serde_bytes", - "serde_json", - "time", - "uuid 1.7.0", -] - -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata 0.1.10", -] - -[[package]] -name = "bumpalo" -version = "3.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byte-unit" -version = "4.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da78b32057b8fdfc352504708feeba7216dcd65a2c9ab02978cbd288d1279b6c" -dependencies = [ - "serde", - "utf8-width", -] - -[[package]] -name = "bytecount" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -dependencies = [ - "byteorder", - "iovec", -] - -[[package]] -name = "bytes" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" - -[[package]] -name = "bytes-utils" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" -dependencies = [ - "bytes 1.5.0", - "either", -] - -[[package]] -name = "bytesize" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" -dependencies = [ - "serde", -] - -[[package]] -name = "cache_control" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf2a5fb3207c12b5d208ebc145f967fea5cac41a021c37417ccc31ba40f39ee" - -[[package]] -name = "cbc" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" -dependencies = [ - "cipher", -] - -[[package]] -name = "cc" -version = "1.0.90" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom 7.1.3", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - -[[package]] -name = "chrono" -version = "0.4.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "num-traits", - "serde", - "windows-targets 0.52.4", -] - -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - -[[package]] -name = "clang-sys" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" -dependencies = [ - "glob", - "libc", - "libloading 0.8.3", -] - -[[package]] -name = "clap" -version = "4.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.11.0", - "unicase", - "unicode-width", -] - -[[package]] -name = "clap_derive" -version = "4.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" -dependencies = [ - "heck", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "clap_lex" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "cmake" -version = "0.1.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" -dependencies = [ - "cc", -] - -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - -[[package]] -name = "colored" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" -dependencies = [ - "lazy_static", - "windows-sys 0.48.0", -] - -[[package]] -name = "combine" -version = "4.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" -dependencies = [ - "bytes 1.5.0", - "memchr", -] - -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils 0.8.19", -] - -[[package]] -name = "console" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "console_static_text" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4be93df536dfbcbd39ff7c129635da089901116b88bfc29ec1acb9b56f8ff35" -dependencies = [ - "unicode-width", - "vte", -] - -[[package]] -name = "const-oid" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" - -[[package]] -name = "const-random" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" -dependencies = [ - "const-random-macro", -] - -[[package]] -name = "const-random-macro" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" -dependencies = [ - "getrandom 0.2.12", - "once_cell", - "tiny-keccak", -] - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - -[[package]] -name = "cpufeatures" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32c" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89254598aa9b9fa608de44b3ae54c810f0f06d755e24c50177f1f8f31ff50ce2" -dependencies = [ - "rustc_version 0.4.0", -] - -[[package]] -name = "crc32fast" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" -dependencies = [ - "crossbeam-utils 0.8.19", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - -[[package]] -name = "crypto-bigint" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "typenum", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - -[[package]] -name = "ctrlc" -version = "3.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" -dependencies = [ - "nix 0.28.0", - "windows-sys 0.52.0", -] - -[[package]] -name = "curve25519-dalek" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "curve25519-dalek-derive", - "fiat-crypto", - "platforms", - "rustc_version 0.4.0", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", -] - -[[package]] -name = "darling" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" -dependencies = [ - "darling_core 0.20.8", - "darling_macro 0.20.8", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.79", - "quote 1.0.35", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.79", - "quote 1.0.35", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.79", - "quote 1.0.35", - "strsim 0.10.0", - "syn 2.0.52", -] - -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.20.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" -dependencies = [ - "darling_core 0.20.8", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "dashmap" -version = "5.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.14.3", - "lock_api 0.4.11", - "once_cell", - "parking_lot_core 0.9.9", -] - -[[package]] -name = "data-encoding" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" - -[[package]] -name = "data-url" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d7439c3735f405729d52c3fbbe4de140eaf938a1fe47d227c27f8254d4302a5" - -[[package]] -name = "data-url" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b319d1b62ffbd002e057f36bebd1f42b9f97927c9577461d855f3513c4289f" - -[[package]] -name = "deadpool" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" -dependencies = [ - "async-trait", - "deadpool-runtime", - "num_cpus", - "retain_mut", - "tokio 1.36.0", -] - -[[package]] -name = "deadpool-runtime" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49" - -[[package]] -name = "debugid" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" -dependencies = [ - "serde", - "uuid 1.7.0", -] - -[[package]] -name = "deno-proc-macro-rules" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c65c2ffdafc1564565200967edc4851c7b55422d3913466688907efd05ea26f" -dependencies = [ - "deno-proc-macro-rules-macros", - "proc-macro2 1.0.79", - "syn 2.0.52", -] - -[[package]] -name = "deno-proc-macro-rules-macros" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3047b312b7451e3190865713a4dd6e1f821aed614ada219766ebc3024a690435" -dependencies = [ - "once_cell", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "deno_ast" -version = "0.31.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7b09db895527a94de1305455338926cd2a7003231ba589b7b7b57e8da344f2" -dependencies = [ - "anyhow", - "base64 0.13.1", - "deno_media_type", - "dprint-swc-ext", - "serde", - "swc_atoms", - "swc_common", - "swc_config", - "swc_config_macro", - "swc_ecma_ast", - "swc_ecma_codegen", - "swc_ecma_codegen_macros", - "swc_ecma_loader", - "swc_ecma_parser", - "swc_ecma_transforms_base", - "swc_ecma_transforms_classes", - "swc_ecma_transforms_macros", - "swc_ecma_transforms_proposal", - "swc_ecma_transforms_react", - "swc_ecma_transforms_typescript", - "swc_ecma_utils", - "swc_ecma_visit", - "swc_eq_ignore_macros", - "swc_macros_common", - "swc_visit", - "swc_visit_macros", - "text_lines", - "url", -] - -[[package]] -name = "deno_broadcast_channel" -version = "0.116.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "115656db52c2b16ec7c10f198a4d408465ee106845fa019b1a7d0666a787355e" -dependencies = [ - "async-trait", - "deno_core", - "tokio 1.36.0", - "uuid 1.7.0", -] - -[[package]] -name = "deno_cache" -version = "0.54.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f803db27a98fecac8bbd3d45093859eb85e9b09a345d585b924ca59cf5133822" -dependencies = [ - "async-trait", - "deno_core", - "rusqlite", - "serde", - "sha2", - "tokio 1.36.0", -] - -[[package]] -name = "deno_console" -version = "0.122.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "647238f8bb4f530e09eee7a3fc5fed872c955d90983d4f9834659a342b732327" -dependencies = [ - "deno_core", -] - -[[package]] -name = "deno_core" -version = "0.224.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d23e2333ee4155236c3c7be8ec5ad9586eed854a47fe3804924627ca5a476e8" -dependencies = [ - "anyhow", - "bytes 1.5.0", - "deno_ops", - "deno_unsync 0.3.2", - "futures 0.3.30", - "indexmap 2.2.5", - "libc", - "log", - "once_cell", - "parking_lot 0.12.1", - "pin-project", - "serde", - "serde_json", - "serde_v8 0.133.0", - "smallvec 1.13.1", - "sourcemap 7.1.1", - "tokio 1.36.0", - "url", - "v8", -] - -[[package]] -name = "deno_cron" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5025ffddb4b4910760e11efbe5a8f3603213df09aebf4330d38cf50b73d350c3" -dependencies = [ - "anyhow", - "async-trait", - "chrono", - "deno_core", - "deno_unsync 0.1.1", - "saffron", - "tokio 1.36.0", -] - -[[package]] -name = "deno_crypto" -version = "0.136.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80418450f159d98d474415eb6b883d48cff2bdcd8be6ab83106776ebd2da6309" -dependencies = [ - "aes", - "aes-gcm", - "aes-kw", - "base64 0.21.7", - "cbc", - "const-oid", - "ctr", - "curve25519-dalek 2.1.3", - "deno_core", - "deno_web", - "elliptic-curve 0.12.3", - "num-traits", - "once_cell", - "p256 0.11.1", - "p384 0.11.2", - "rand 0.8.5", - "ring 0.17.8", - "rsa", - "sec1 0.3.0", - "serde", - "serde_bytes", - "sha1", - "sha2", - "signature 1.6.4", - "spki 0.6.0", - "tokio 1.36.0", - "uuid 1.7.0", - "x25519-dalek", -] - -[[package]] -name = "deno_fetch" -version = "0.146.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f7cbc95975deb43f45145c64bc51bcd8de8d8eca8ca2da04fb17933b8bbd77b" -dependencies = [ - "bytes 1.5.0", - "data-url 0.3.0", - "deno_core", - "deno_tls", - "dyn-clone", - "http 0.2.12", - "reqwest", - "serde", - "tokio 1.36.0", - "tokio-util", -] - -[[package]] -name = "deno_ffi" -version = "0.109.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36d787e5d92873c80b33baaad0f57a5af0327c782afd39d78fa91e1226907ec9" -dependencies = [ - "deno_core", - "dlopen", - "dynasmrt", - "libffi", - "libffi-sys", - "serde", - "serde-value", - "serde_json", - "tokio 1.36.0", - "winapi 0.3.9", -] - -[[package]] -name = "deno_fs" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38360e6581070c251892201e3dde6c5f660d2595889dd67e8700bb08def1c375" -dependencies = [ - "async-trait", - "deno_core", - "deno_io", - "filetime", - "fs3", - "libc", - "log", - "nix 0.26.2", - "rand 0.8.5", - "serde", - "tokio 1.36.0", - "winapi 0.3.9", -] - -[[package]] -name = "deno_http" -version = "0.117.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d33e8881b83b88938b794142afa609bc7d2bd318291f4bcf75670b24b40e6cd3" -dependencies = [ - "async-compression", - "async-trait", - "base64 0.21.7", - "brotli", - "bytes 1.5.0", - "cache_control", - "deno_core", - "deno_net", - "deno_websocket", - "flate2", - "fly-accept-encoding", - "http 0.2.12", - "httparse", - "hyper 0.14.28", - "hyper 1.0.0-rc.4", - "memmem", - "mime", - "once_cell", - "percent-encoding", - "phf 0.10.1", - "pin-project", - "ring 0.17.8", - "scopeguard", - "serde", - "slab", - "smallvec 1.13.1", - "thiserror", - "tokio 1.36.0", - "tokio-util", -] - -[[package]] -name = "deno_io" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a04e44d715cc025554b6cf6d691a435ce4cbb85f21407b205e34d818b63b6d8" -dependencies = [ - "async-trait", - "deno_core", - "filetime", - "fs3", - "once_cell", - "tokio 1.36.0", - "winapi 0.3.9", -] - -[[package]] -name = "deno_kv" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d7e776533bbbda7f4b25a20c8516a538ea07c52e55ab03564487904679f8107" -dependencies = [ - "anyhow", - "async-trait", - "base64 0.21.7", - "chrono", - "deno_core", - "deno_fetch", - "deno_node", - "deno_tls", - "deno_unsync 0.1.1", - "denokv_proto", - "denokv_remote", - "denokv_sqlite", - "hex", - "log", - "num-bigint", - "prost", - "prost-build", - "rand 0.8.5", - "reqwest", - "rusqlite", - "serde", - "serde_json", - "termcolor", - "tokio 1.36.0", - "url", - "uuid 1.7.0", -] - -[[package]] -name = "deno_media_type" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a798670c20308e5770cc0775de821424ff9e85665b602928509c8c70430b3ee0" -dependencies = [ - "data-url 0.3.0", - "serde", - "url", -] - -[[package]] -name = "deno_napi" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e390ac573b09d416dc23b755c7b07a2c28054c2ad5f59c149aaee547230ba8" -dependencies = [ - "deno_core", - "libloading 0.7.4", -] - -[[package]] -name = "deno_native_certs" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9c7c5a7f03be0fccb523765d76aa6758760f3163459ce777ba3979596db0998" -dependencies = [ - "dlopen", - "dlopen_derive", - "once_cell", - "rustls-native-certs", - "rustls-pemfile", -] - -[[package]] -name = "deno_net" -version = "0.114.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43cc6e825d38f3407dac07b6db6db55f8a4e00302a4d6272b08c59c0408956d3" -dependencies = [ - "deno_core", - "deno_tls", - "enum-as-inner 0.5.1", - "log", - "pin-project", - "serde", - "socket2 0.5.6", - "tokio 1.36.0", - "trust-dns-proto 0.22.0", - "trust-dns-resolver 0.22.0", -] - -[[package]] -name = "deno_node" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d448d65ab1afcbfdf0b37e1feaac304989fc71e6647f9e3bbccd3398fe730e3" -dependencies = [ - "aead-gcm-stream", - "aes", - "brotli", - "bytes 1.5.0", - "cbc", - "data-encoding", - "deno_core", - "deno_fetch", - "deno_fs", - "deno_media_type", - "deno_net", - "deno_whoami", - "digest 0.10.7", - "dsa", - "ecb", - "elliptic-curve 0.13.8", - "errno 0.2.8", - "h2", - "hex", - "hkdf", - "http 0.2.12", - "idna 0.3.0", - "indexmap 2.2.5", - "lazy-regex", - "libc", - "libz-sys", - "md-5", - "md4", - "num-bigint", - "num-bigint-dig", - "num-integer", - "num-traits", - "once_cell", - "p224", - "p256 0.13.2", - "p384 0.13.0", - "path-clean", - "pbkdf2 0.12.2", - "rand 0.8.5", - "regex", - "reqwest", - "ring 0.17.8", - "ripemd", - "rsa", - "scrypt 0.11.0", - "secp256k1", - "serde", - "sha-1", - "sha2", - "signature 1.6.4", - "tokio 1.36.0", - "typenum", - "url", - "winapi 0.3.9", - "x25519-dalek", - "x509-parser", -] - -[[package]] -name = "deno_ops" -version = "0.100.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "466fad92ea239f85cad8de032660c9b8d6fe361260ce3169b5893592902fe2f4" -dependencies = [ - "deno-proc-macro-rules", - "lazy-regex", - "once_cell", - "pmutil", - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.79", - "quote 1.0.35", - "regex", - "strum", - "strum_macros", - "syn 2.0.52", - "thiserror", -] - -[[package]] -name = "deno_runtime" -version = "0.130.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525fcb938c4daa67ffcd4d299f2cd3202012d5beb2c93cc81efd6b19d2b57c6a" -dependencies = [ - "console_static_text", - "deno_ast", - "deno_broadcast_channel", - "deno_cache", - "deno_console", - "deno_core", - "deno_cron", - "deno_crypto", - "deno_fetch", - "deno_ffi", - "deno_fs", - "deno_http", - "deno_io", - "deno_kv", - "deno_napi", - "deno_net", - "deno_node", - "deno_tls", - "deno_url", - "deno_web", - "deno_webidl", - "deno_websocket", - "deno_webstorage", - "dlopen", - "encoding_rs", - "fastwebsockets", - "filetime", - "fs3", - "fwdansi", - "http 0.2.12", - "hyper 0.14.28", - "libc", - "log", - "netif", - "nix 0.26.2", - "notify", - "ntapi", - "once_cell", - "regex", - "ring 0.17.8", - "serde", - "signal-hook-registry", - "termcolor", - "tokio 1.36.0", - "tokio-metrics", - "uuid 1.7.0", - "which", - "winapi 0.3.9", - "winres", -] - -[[package]] -name = "deno_tls" -version = "0.109.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e42f3550180526960e17aaa84d160f24c8d18a7768e3e3727f109f6727b1837" -dependencies = [ - "deno_core", - "deno_native_certs", - "once_cell", - "rustls", - "rustls-pemfile", - "rustls-webpki", - "serde", - "webpki-roots", -] - -[[package]] -name = "deno_unsync" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac0984205f25e71ddd1be603d76e70255953c12ff864707359ab195d26dfc7b3" -dependencies = [ - "tokio 1.36.0", -] - -[[package]] -name = "deno_unsync" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30dff7e03584dbae188dae96a0f1876740054809b2ad0cf7c9fc5d361f20e739" -dependencies = [ - "tokio 1.36.0", -] - -[[package]] -name = "deno_url" -version = "0.122.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba15c1b31ee4b084dca4ccd2a6d2b7f9fae0140d19358cea0713bf93053e48e" -dependencies = [ - "deno_core", - "serde", - "urlpattern", -] - -[[package]] -name = "deno_web" -version = "0.153.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39563ebee10317fc886a90b2eb5c1dd95ce3f344fc0aace7b02ed3caee22294" -dependencies = [ - "async-trait", - "base64-simd 0.8.0", - "bytes 1.5.0", - "deno_core", - "encoding_rs", - "flate2", - "futures 0.3.30", - "serde", - "tokio 1.36.0", - "uuid 1.7.0", - "windows-sys 0.48.0", -] - -[[package]] -name = "deno_webidl" -version = "0.122.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0229857656c35f315f30a53cdb8527a5f47cf08c546f8db03255a7876771f670" -dependencies = [ - "deno_core", -] - -[[package]] -name = "deno_websocket" -version = "0.127.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bc834768b450cd0b70a45abec814fe40a24073409deb419048ae7536910e64" -dependencies = [ - "bytes 1.5.0", - "deno_core", - "deno_net", - "deno_tls", - "fastwebsockets", - "h2", - "http 0.2.12", - "hyper 0.14.28", - "once_cell", - "rustls-tokio-stream", - "serde", - "tokio 1.36.0", -] - -[[package]] -name = "deno_webstorage" -version = "0.117.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fab67d15081e3a06435911aa49b075d56be25ec64e6abbe228f1f04e79b9ad6" -dependencies = [ - "deno_core", - "deno_web", - "rusqlite", - "serde", -] - -[[package]] -name = "deno_whoami" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75e4caa92b98a27f09c671d1399aee0f5970aa491b9a598523aac000a2192e3" -dependencies = [ - "libc", - "whoami", -] - -[[package]] -name = "denokv_proto" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8952fb8c38c1dcd796d49b00030afb74aa184160ae86817b72a32a994c8e16f0" -dependencies = [ - "anyhow", - "async-trait", - "chrono", - "num-bigint", - "prost", - "prost-build", - "serde", - "uuid 1.7.0", -] - -[[package]] -name = "denokv_remote" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfc8447324d783b01e215bd5040ff9149c34d9715c7b7b5080dd648ebf1148a" -dependencies = [ - "anyhow", - "async-trait", - "bytes 1.5.0", - "chrono", - "denokv_proto", - "log", - "prost", - "rand 0.8.5", - "reqwest", - "serde", - "serde_json", - "tokio 1.36.0", - "url", - "uuid 1.7.0", -] - -[[package]] -name = "denokv_sqlite" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ec76b691ff069f14e56e3e053c2b2163540b27e4b60179f2b120064a7e4960d" -dependencies = [ - "anyhow", - "async-trait", - "chrono", - "denokv_proto", - "futures 0.3.30", - "log", - "num-bigint", - "rand 0.8.5", - "rusqlite", - "serde_json", - "tokio 1.36.0", - "uuid 1.7.0", -] - -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "pem-rfc7468 0.6.0", - "zeroize", -] - -[[package]] -name = "der" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" -dependencies = [ - "const-oid", - "pem-rfc7468 0.7.0", - "zeroize", -] - -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs", - "displaydoc", - "nom 7.1.3", - "num-bigint", - "num-traits", - "rusticata-macros", -] - -[[package]] -name = "deranged" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" -dependencies = [ - "powerfmt", - "serde", -] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2 1.0.79", - "quote 1.0.35", - "rustc_version 0.4.0", - "syn 1.0.109", -] - -[[package]] -name = "difflib" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "const-oid", - "crypto-common", - "subtle", -] - -[[package]] -name = "dirs" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users", - "winapi 0.3.9", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi 0.3.9", -] - -[[package]] -name = "displaydoc" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "dlopen" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e80ad39f814a9abe68583cd50a2d45c8a67561c3361ab8da240587dda80937" -dependencies = [ - "dlopen_derive", - "lazy_static", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "dlopen_derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f236d9e1b1fbd81cea0f9cbdc8dcc7e8ebcd80e6659cd7cb2ad5f6c05946c581" -dependencies = [ - "libc", - "quote 0.6.13", - "syn 0.15.44", -] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "dotenvy" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" - -[[package]] -name = "downcast" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" - -[[package]] -name = "dprint-swc-ext" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f24ce6b89a06ae3eb08d5d4f88c05d0aef1fa58e2eba8dd92c97b84210c25" -dependencies = [ - "bumpalo", - "num-bigint", - "rustc-hash", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_parser", - "text_lines", -] - -[[package]] -name = "dsa" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48bc224a9084ad760195584ce5abb3c2c34a225fa312a128ad245a6b412b7689" -dependencies = [ - "digest 0.10.7", - "num-bigint-dig", - "num-traits", - "pkcs8 0.10.2", - "rfc6979 0.4.0", - "sha2", - "signature 2.2.0", - "zeroize", -] - -[[package]] -name = "dyn-clone" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" - -[[package]] -name = "dynasm" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add9a102807b524ec050363f09e06f1504214b0e1c7797f64261c891022dce8b" -dependencies = [ - "bitflags 1.3.2", - "byteorder", - "lazy_static", - "proc-macro-error", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "dynasmrt" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64fba5a42bd76a17cad4bfa00de168ee1cbfa06a5e8ce992ae880218c05641a9" -dependencies = [ - "byteorder", - "dynasm", - "memmap2", -] - -[[package]] -name = "ecb" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a8bfa975b1aec2145850fcaa1c6fe269a16578c44705a532ae3edc92b8881c7" -dependencies = [ - "cipher", -] - -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - -[[package]] -name = "ecdsa" -version = "0.16.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" -dependencies = [ - "der 0.7.8", - "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", - "spki 0.7.3", -] - -[[package]] -name = "either" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" - -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array 0.14.7", - "group 0.12.1", - "hkdf", - "pem-rfc7468 0.6.0", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - -[[package]] -name = "elliptic-curve" -version = "0.13.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" -dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", - "digest 0.10.7", - "ff 0.13.0", - "generic-array 0.14.7", - "group 0.13.0", - "hkdf", - "pem-rfc7468 0.7.0", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "sec1 0.7.3", - "subtle", - "zeroize", -] - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -name = "encoding_rs" -version = "0.8.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "enum-as-inner" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" -dependencies = [ - "heck", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "enum-as-inner" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" -dependencies = [ - "heck", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "env_logger" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" -dependencies = [ - "log", - "regex", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "errno" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "error-stack" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27a72baa257b5e0e2de241967bc5ee8f855d6072351042688621081d66b2a76b" -dependencies = [ - "anyhow", - "rustc_version 0.4.0", -] - -[[package]] -name = "etcd-client" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89" -dependencies = [ - "http 0.2.12", - "prost", - "tokio 1.36.0", - "tokio-stream", - "tonic 0.9.2", - "tonic-build 0.9.2", - "tower", - "tower-service", -] - -[[package]] -name = "eth-keystore" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" -dependencies = [ - "aes", - "ctr", - "digest 0.10.7", - "hex", - "hmac", - "pbkdf2 0.11.0", - "rand 0.8.5", - "scrypt 0.10.0", - "serde", - "serde_json", - "sha2", - "sha3", - "thiserror", - "uuid 0.8.2", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" +[[package]] +name = "aws-types" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +checksum = "5221b91b3e441e6675310829fd8984801b772cb1546ef6c0e54dec9f1ac13fef" dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version 0.4.1", + "tracing", ] [[package]] -name = "ethereum-types" -version = "0.14.1" +name = "axum" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.1", + "tower 0.5.1", + "tower-layer", + "tower-service", ] [[package]] -name = "event-listener" -version = "2.5.3" +name = "axum-core" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.1", + "tower-layer", + "tower-service", +] [[package]] -name = "exponential-backoff" -version = "1.2.0" +name = "backtrace" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47f78d87d930eee4b5686a2ab032de499c72bd1e954b84262bb03492a0f932cd" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ - "rand 0.8.5", + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] -name = "fallible-iterator" -version = "0.2.0" +name = "base16ct" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" [[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" +name = "base16ct" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] -name = "fastrand" -version = "1.9.0" +name = "base64" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] -name = "fastrand" -version = "2.0.1" +name = "base64" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] -name = "fastwebsockets" -version = "0.5.0" +name = "base64-simd" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17c35f166afb94b7f8e9449d0ad866daca111ba4053f3b1960bb480ca4382c63" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" dependencies = [ - "base64 0.21.7", - "hyper 0.14.28", - "pin-project", - "rand 0.8.5", - "sha1", - "simdutf8", - "thiserror", - "tokio 1.36.0", - "utf-8", + "outref", + "vsimd", ] [[package]] -name = "ff" -version = "0.12.1" +name = "base64ct" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] -name = "ff" -version = "0.13.0" +name = "bincode" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "rand_core 0.6.4", - "subtle", + "serde", ] [[package]] -name = "fiat-crypto" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" - -[[package]] -name = "filetime" -version = "0.2.23" +name = "bit-set" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall 0.4.1", - "windows-sys 0.52.0", + "bit-vec", ] [[package]] -name = "finl_unicode" -version = "1.2.0" +name = "bit-vec" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] -name = "fixed-hash" -version = "0.8.0" +name = "bitflags" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -name = "fixedbitset" -version = "0.4.2" +name = "bitflags" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] -name = "flatbuffers" -version = "23.5.26" +name = "bitvec" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "bitflags 1.3.2", - "rustc_version 0.4.0", + "funty", + "radium", + "tap", + "wyz", ] [[package]] -name = "flate2" -version = "1.0.28" +name = "block-buffer" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "crc32fast", - "libz-ng-sys", - "miniz_oxide", + "generic-array", ] [[package]] -name = "float-cmp" -version = "0.9.0" +name = "blst" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" dependencies = [ - "num-traits", + "cc", + "glob", + "threadpool", + "zeroize", ] [[package]] -name = "fly-accept-encoding" -version = "0.2.0" +name = "bollard" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3afa7516fdcfd8e5e93a938f8fec857785ced190a1f62d842d1fe1ffbe22ba8" +checksum = "d41711ad46fda47cd701f6908e59d1bd6b9a2b7464c0d0aeab95c6d37096ff8a" dependencies = [ - "http 0.2.12", - "itertools", + "base64 0.22.1", + "bollard-stubs", + "bytes", + "futures-core", + "futures-util", + "hex", + "home", + "http 1.1.0", + "http-body-util", + "hyper 1.4.1", + "hyper-named-pipe", + "hyper-rustls 0.27.3", + "hyper-util", + "hyperlocal", + "log", + "pin-project-lite", + "rustls 0.23.14", + "rustls-native-certs 0.7.3", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", "thiserror", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", ] [[package]] -name = "fnv" -version = "1.0.7" +name = "bollard-stubs" +version = "1.45.0-rc.26.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +checksum = "6d7c5415e3a6bc6d3e99eff6268e488fd4ee25e7b28c10f08fa6760bd9de16e4" +dependencies = [ + "serde", + "serde_repr", + "serde_with", +] [[package]] -name = "foreign-types" -version = "0.3.2" +name = "borsh" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ - "foreign-types-shared", + "borsh-derive", + "cfg_aliases", ] [[package]] -name = "foreign-types-shared" -version = "0.1.1" +name = "borsh-derive" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.79", + "syn_derive", +] [[package]] -name = "form_urlencoded" -version = "1.2.1" +name = "bumpalo" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding", -] +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] -name = "fragile" -version = "2.0.0" +name = "byte-slice-cast" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] -name = "from_variant" -version = "0.1.6" +name = "byte-unit" +version = "5.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ec5dc38ee19078d84a692b1c41181ff9f94331c76cee66ff0208c770b5e54f" +checksum = "33ac19bdf0b2665407c39d82dbc937e951e7e2001609f0fb32edd0af45a2d63e" dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "swc_macros_common", - "syn 2.0.52", + "rust_decimal", + "serde", + "utf8-width", ] [[package]] -name = "fs3" -version = "0.5.0" +name = "bytecheck" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb17cf6ed704f72485332f6ab65257460c4f9f3083934cf402bf9f5b3b600a90" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ - "libc", - "rustc_version 0.2.3", - "winapi 0.3.9", + "bytecheck_derive 0.6.12", + "ptr_meta 0.1.4", + "simdutf8", ] [[package]] -name = "fsevent-sys" -version = "4.1.0" +name = "bytecheck" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +checksum = "50c8f430744b23b54ad15161fcbc22d82a29b73eacbe425fea23ec822600bc6f" dependencies = [ - "libc", + "bytecheck_derive 0.8.0", + "ptr_meta 0.3.0", + "rancor", + "simdutf8", ] [[package]] -name = "fslock" -version = "0.1.8" +name = "bytecheck_derive" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57eafdd0c16f57161105ae1b98a1238f97645f2f588438b2949c99a2af9616bf" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "libc", - "winapi 0.3.9", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "bytecheck_derive" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +checksum = "523363cbe1df49b68215efdf500b103ac3b0fb4836aed6d15689a076eadb8fff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] [[package]] -name = "fuchsia-zircon" -version = "0.3.3" +name = "bytemuck" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags 1.3.2", - "fuchsia-zircon-sys", -] +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" [[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" +name = "byteorder" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] -name = "funty" -version = "2.0.0" +name = "bytes" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +dependencies = [ + "serde", +] [[package]] -name = "futures" -version = "0.1.31" +name = "bytes-utils" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] [[package]] -name = "futures" -version = "0.3.30" +name = "c-kzg" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", + "blst", + "cc", + "glob", + "hex", + "libc", + "once_cell", + "serde", ] [[package]] -name = "futures-channel" -version = "0.3.30" +name = "cc" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ - "futures-core", - "futures-sink", + "jobserver", + "libc", + "shlex", ] [[package]] -name = "futures-core" -version = "0.3.30" +name = "cfg-if" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "futures-executor" -version = "0.3.30" +name = "cfg_aliases" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] -name = "futures-io" -version = "0.3.30" +name = "chrono" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-targets 0.52.6", +] [[package]] -name = "futures-lite" -version = "1.13.0" +name = "cipher" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", + "crypto-common", + "inout", ] [[package]] -name = "futures-macro" -version = "0.3.30" +name = "clap" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "clap_builder", + "clap_derive", ] [[package]] -name = "futures-sink" -version = "0.3.30" +name = "clap_builder" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.1", + "unicase", + "unicode-width", +] [[package]] -name = "futures-task" -version = "0.3.30" +name = "clap_derive" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.79", +] [[package]] -name = "futures-timer" -version = "3.0.3" +name = "clap_lex" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] -name = "futures-util" -version = "0.3.30" +name = "cmsketch" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "aeccf706e341a5fcdc7f309af21f75eb4dd68fd7474e171bfe1a5570ea48307a" dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", + "paste", ] [[package]] -name = "fwdansi" -version = "1.1.0" +name = "colorchoice" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c1f5787fe85505d1f7777268db5103d80a7a374d2316a7ce262e57baf8f208" -dependencies = [ - "memchr", - "termcolor", -] +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] -name = "generic-array" -version = "0.12.4" +name = "concurrent-queue" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "typenum", + "crossbeam-utils", ] [[package]] -name = "generic-array" -version = "0.14.7" +name = "const-hex" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ - "typenum", - "version_check", - "zeroize", + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", ] [[package]] -name = "getrandom" -version = "0.1.16" +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "core-foundation" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ - "cfg-if 1.0.0", + "core-foundation-sys", "libc", - "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] -name = "getrandom" -version = "0.2.12" +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ - "cfg-if 1.0.0", - "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] -name = "ghash" -version = "0.5.1" +name = "crc32c" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" dependencies = [ - "opaque-debug", - "polyval", + "rustc_version 0.4.1", ] [[package]] -name = "gimli" -version = "0.28.1" +name = "crc32fast" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +dependencies = [ + "cfg-if", +] [[package]] -name = "glob" -version = "0.3.1" +name = "crossbeam" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] [[package]] -name = "governor" -version = "0.6.3" +name = "crossbeam-channel" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "cfg-if 1.0.0", - "dashmap", - "futures 0.3.30", - "futures-timer", - "no-std-compat", - "nonzero_ext", - "parking_lot 0.12.1", - "portable-atomic", - "quanta", - "rand 0.8.5", - "smallvec 1.13.1", - "spinning_top", + "crossbeam-utils", ] [[package]] -name = "group" -version = "0.12.1" +name = "crossbeam-deque" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] -name = "group" -version = "0.13.0" +name = "crossbeam-epoch" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "ff 0.13.0", - "rand_core 0.6.4", - "subtle", + "crossbeam-utils", ] [[package]] -name = "h2" -version = "0.3.24" +name = "crossbeam-queue" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "bytes 1.5.0", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.2.5", - "slab", - "tokio 1.36.0", - "tokio-util", - "tracing", + "crossbeam-utils", ] [[package]] -name = "half" -version = "2.4.0" +name = "crossbeam-utils" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" -dependencies = [ - "cfg-if 1.0.0", - "crunchy", - "num-traits", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] -name = "hashbrown" -version = "0.12.3" +name = "crunchy" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] -name = "hashbrown" -version = "0.13.2" +name = "crypto-bigint" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] [[package]] -name = "hashbrown" -version = "0.14.3" +name = "crypto-bigint" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "ahash", - "allocator-api2", + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", ] [[package]] -name = "hashlink" -version = "0.8.4" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "hashbrown 0.14.3", + "generic-array", + "typenum", ] [[package]] -name = "headers" -version = "0.3.9" +name = "ctor" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "base64 0.21.7", - "bytes 1.5.0", - "headers-core", - "http 0.2.12", - "httpdate", - "mime", - "sha1", + "quote", + "syn 1.0.109", ] [[package]] -name = "headers-core" -version = "0.2.0" +name = "ctr" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "http 0.2.12", + "cipher", ] [[package]] -name = "heck" -version = "0.4.1" +name = "ctrlc" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" +dependencies = [ + "nix", + "windows-sys 0.59.0", +] [[package]] -name = "hermit-abi" -version = "0.3.9" +name = "darling" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] [[package]] -name = "hex" -version = "0.4.3" +name = "darling" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "serde", + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] -name = "hkdf" -version = "0.12.4" +name = "darling_core" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ - "hmac", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", ] [[package]] -name = "hmac" -version = "0.12.1" +name = "darling_core" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ - "digest 0.10.7", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.79", ] [[package]] -name = "home" -version = "0.5.9" +name = "darling_macro" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "windows-sys 0.52.0", + "darling_core 0.14.4", + "quote", + "syn 1.0.109", ] [[package]] -name = "hostname" -version = "0.3.1" +name = "darling_macro" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "libc", - "match_cfg", - "winapi 0.3.9", + "darling_core 0.20.10", + "quote", + "syn 2.0.79", ] [[package]] -name = "hstr" -version = "0.2.7" +name = "dashmap" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17fafeca18cf0927e23ea44d7a5189c10536279dfe9094e0dfa953053fbb5377" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ - "new_debug_unreachable", + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", "once_cell", - "phf 0.11.2", - "rustc-hash", - "smallvec 1.13.1", + "parking_lot_core", ] [[package]] -name = "http" -version = "0.2.12" +name = "der" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "bytes 1.5.0", - "fnv", - "itoa", + "const-oid", + "zeroize", ] [[package]] -name = "http" -version = "1.1.0" +name = "der" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ - "bytes 1.5.0", - "fnv", - "itoa", + "const-oid", + "zeroize", ] [[package]] -name = "http-body" -version = "0.4.6" +name = "deranged" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ - "bytes 1.5.0", - "http 0.2.12", - "pin-project-lite", + "powerfmt", + "serde", ] [[package]] -name = "http-body" -version = "1.0.0-rc.2" +name = "derivative" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "951dfc2e32ac02d67c90c0d65bd27009a635dc9b381a2cc7d284ab01e3a0150d" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "bytes 1.5.0", - "http 0.2.12", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - -[[package]] -name = "http-types" -version = "2.12.0" +name = "derive_more" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ - "anyhow", - "async-channel", - "base64 0.13.1", - "futures-lite", - "http 0.2.12", - "infer", - "pin-project-lite", - "rand 0.7.3", - "serde", - "serde_json", - "serde_qs", - "serde_urlencoded", - "url", + "derive_more-impl", ] [[package]] -name = "httparse" -version = "1.8.0" +name = "derive_more-impl" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", + "unicode-xid", +] [[package]] -name = "httpdate" -version = "1.0.3" +name = "derive_utils" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] [[package]] -name = "hyper" -version = "0.14.28" +name = "digest" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "bytes 1.5.0", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.5.6", - "tokio 1.36.0", - "tower-service", - "tracing", - "want", + "generic-array", ] [[package]] -name = "hyper" -version = "1.0.0-rc.4" +name = "digest" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d280a71f348bcc670fc55b02b63c53a04ac0bf2daff2980795aeaf53edae10e6" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "bytes 1.5.0", - "futures-channel", - "futures-util", - "h2", - "http 0.2.12", - "http-body 1.0.0-rc.2", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "tokio 1.36.0", - "tracing", - "want", + "block-buffer", + "const-oid", + "crypto-common", + "subtle", ] [[package]] -name = "hyper-rustls" -version = "0.24.2" +name = "dirs" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.28", - "log", - "rustls", - "rustls-native-certs", - "tokio 1.36.0", - "tokio-rustls", + "dirs-sys", ] [[package]] -name = "hyper-timeout" +name = "dirs-sys" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ - "hyper 0.14.28", - "pin-project-lite", - "tokio 1.36.0", - "tokio-io-timeout", + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", ] [[package]] -name = "iana-time-zone" -version = "0.1.60" +name = "docker_credential" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "31951f49556e34d90ed28342e1df7e1cb7a229c4cab0aecc627b5d91edd41d07" dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", + "base64 0.21.7", + "serde", + "serde_json", ] [[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" +name = "downcast-rs" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" [[package]] -name = "ident_case" -version = "1.0.1" +name = "dunce" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] -name = "idna" -version = "0.2.3" +name = "ecdsa" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", ] [[package]] -name = "idna" -version = "0.3.0" +name = "ecdsa" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "der 0.7.9", + "digest 0.10.7", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] -name = "idna" -version = "0.4.0" +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "elliptic-curve" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", ] [[package]] -name = "if_chain" -version = "1.0.2" +name = "elliptic-curve" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.5", + "digest 0.10.7", + "ff 0.13.0", + "generic-array", + "group 0.13.0", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sec1 0.7.3", + "subtle", + "zeroize", +] [[package]] -name = "impl-codec" -version = "0.6.0" +name = "encoding_rs" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ - "parity-scale-codec", + "cfg-if", ] [[package]] -name = "impl-rlp" -version = "0.3.0" +name = "equivalent" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ - "rlp", + "libc", + "windows-sys 0.52.0", ] [[package]] -name = "impl-serde" -version = "0.4.0" +name = "error-stack" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +checksum = "fe413319145d1063f080f27556fd30b1d70b01e2ba10c2a6e40d4be982ffc5d1" dependencies = [ - "serde", + "anyhow", + "rustc_version 0.4.1", ] [[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" +name = "etcd-client" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "39bde3ce50a626efeb1caa9ab1083972d178bebb55ca627639c8ded507dfcbde" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "http 1.1.0", + "prost", + "tokio", + "tokio-stream", + "tonic", + "tonic-build", + "tower 0.4.13", + "tower-service", ] [[package]] -name = "indexmap" -version = "1.9.3" +name = "eth-keystore" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "aes", + "ctr", + "digest 0.10.7", + "hex", + "hmac", + "pbkdf2", + "rand 0.8.5", + "scrypt", "serde", + "serde_json", + "sha2", + "sha3", + "thiserror", + "uuid 0.8.2", ] [[package]] -name = "indexmap" -version = "2.2.5" +name = "ethbloom" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ - "equivalent", - "hashbrown 0.14.3", - "serde", + "crunchy", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", ] [[package]] -name = "infer" -version = "0.2.3" +name = "ethereum-types" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types", + "uint", +] [[package]] -name = "inotify" -version = "0.9.6" +name = "event-listener" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ - "bitflags 1.3.2", - "inotify-sys", - "libc", + "concurrent-queue", + "parking", + "pin-project-lite", ] [[package]] -name = "inotify-sys" -version = "0.1.5" +name = "event-listener-strategy" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "libc", + "event-listener", + "pin-project-lite", ] [[package]] -name = "inout" -version = "0.1.3" +name = "fastrace" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "fe845ecd1e3dba36bd7a20ea3b46c81ec610d5a2ffe288160a7cc6a2051496a5" dependencies = [ - "block-padding", - "generic-array 0.14.7", + "fastrace-macro", + "minstant", + "once_cell", + "parking_lot", + "pin-project", + "rand 0.8.5", + "rtrb", ] [[package]] -name = "instant" -version = "0.1.12" +name = "fastrace-macro" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "a09bf248c7ec91a448701fa2c31750f78be6cbc3d5269dbb82a9f3945776d1f4" dependencies = [ - "cfg-if 1.0.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "integer-encoding" -version = "3.0.4" +name = "fastrand" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] -name = "iovec" -version = "0.1.4" +name = "fastrlp" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ - "libc", + "arrayvec", + "auto_impl", + "bytes", ] [[package]] -name = "ipconfig" -version = "0.3.2" +name = "ff" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "socket2 0.5.6", - "widestring", - "windows-sys 0.48.0", - "winreg", + "rand_core 0.6.4", + "subtle", ] [[package]] -name = "ipnet" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" - -[[package]] -name = "is-macro" -version = "0.3.5" +name = "ff" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a85abdc13717906baccb5a1e435556ce0df215f242892f721dff62bf25288f" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "Inflector", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "rand_core 0.6.4", + "subtle", ] [[package]] -name = "itertools" -version = "0.10.5" +name = "fixed-hash" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ - "either", + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", ] [[package]] -name = "itoa" -version = "1.0.10" +name = "fixedbitset" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] -name = "jemalloc-sys" -version = "0.5.4+5.3.0-patched" +name = "flate2" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ - "cc", - "libc", + "crc32fast", + "miniz_oxide", ] [[package]] -name = "jemallocator" -version = "0.5.4" +name = "flume" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" dependencies = [ - "jemalloc-sys", - "libc", + "futures-core", + "futures-sink", + "nanorand", + "spin", ] [[package]] -name = "js-sys" -version = "0.3.69" +name = "fnv" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" -dependencies = [ - "wasm-bindgen", -] +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "json-patch" -version = "1.2.0" +name = "foldhash" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ff1e1486799e3f64129f8ccad108b38290df9cd7015cd31bed17239f0789d6" -dependencies = [ - "serde", - "serde_json", - "thiserror", - "treediff", -] +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" [[package]] -name = "jsonpath_lib" -version = "0.3.0" +name = "foreign-types" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaa63191d68230cccb81c5aa23abd53ed64d83337cacbb25a7b8c7979523774f" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ - "log", - "serde", - "serde_json", + "foreign-types-shared", ] [[package]] -name = "jsonwebtoken" -version = "8.3.0" +name = "foreign-types-shared" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.7", - "pem", - "ring 0.16.20", - "serde", - "serde_json", - "simple_asn1", -] +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] -name = "k8s-openapi" -version = "0.18.0" +name = "form_urlencoded" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "base64 0.21.7", - "bytes 1.5.0", - "chrono", - "http 0.2.12", "percent-encoding", - "schemars", - "serde", - "serde-value", - "serde_json", - "url", -] - -[[package]] -name = "keccak" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" -dependencies = [ - "cpufeatures", ] [[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +name = "foyer" +version = "0.12.0-dev" +source = "git+https://github.com/foyer-rs/foyer.git?rev=d49c480#d49c4800e264f88c53ecca038b011af99a7c82da" dependencies = [ - "winapi 0.2.8", - "winapi-build", + "ahash 0.8.11", + "anyhow", + "fastrace", + "foyer-common", + "foyer-memory", + "foyer-storage", + "futures", + "madsim-tokio", + "pin-project", + "tracing", ] [[package]] -name = "kqueue" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" +name = "foyer-common" +version = "0.12.0-dev" +source = "git+https://github.com/foyer-rs/foyer.git?rev=d49c480#d49c4800e264f88c53ecca038b011af99a7c82da" dependencies = [ - "kqueue-sys", - "libc", + "bytes", + "cfg-if", + "crossbeam", + "fastrace", + "futures", + "hashbrown 0.14.5", + "itertools 0.13.0", + "madsim-tokio", + "metrics", + "parking_lot", + "pin-project", + "serde", ] [[package]] -name = "kqueue-sys" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +name = "foyer-intrusive" +version = "0.12.0-dev" +source = "git+https://github.com/foyer-rs/foyer.git?rev=d49c480#d49c4800e264f88c53ecca038b011af99a7c82da" dependencies = [ - "bitflags 1.3.2", - "libc", + "foyer-common", + "itertools 0.13.0", ] [[package]] -name = "kube" -version = "0.83.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f468b2fa6c5ef92117813238758f79e394c2d7688bd6faa3e77243f90260b0" +name = "foyer-memory" +version = "0.12.0-dev" +source = "git+https://github.com/foyer-rs/foyer.git?rev=d49c480#d49c4800e264f88c53ecca038b011af99a7c82da" dependencies = [ - "k8s-openapi", - "kube-client", - "kube-core", - "kube-derive", - "kube-runtime", + "ahash 0.8.11", + "bitflags 2.6.0", + "cmsketch", + "fastrace", + "foyer-common", + "foyer-intrusive", + "futures", + "hashbrown 0.14.5", + "itertools 0.13.0", + "madsim-tokio", + "parking_lot", + "pin-project", + "serde", + "tracing", ] [[package]] -name = "kube-client" -version = "0.83.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "337eb332d253036adc3247936248d0742c6c743f51eb38a684fd9b3b2878b27c" +name = "foyer-storage" +version = "0.12.0-dev" +source = "git+https://github.com/foyer-rs/foyer.git?rev=d49c480#d49c4800e264f88c53ecca038b011af99a7c82da" dependencies = [ - "base64 0.20.0", - "bytes 1.5.0", - "chrono", - "dirs-next", + "ahash 0.8.11", + "allocator-api2", + "anyhow", + "array-util", + "async-channel", + "auto_enums", + "bincode", + "bitflags 2.6.0", + "bytes", + "clap", "either", - "futures 0.3.30", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "hyper-rustls", - "hyper-timeout", - "jsonpath_lib", - "k8s-openapi", - "kube-core", - "pem", + "fastrace", + "flume", + "foyer-common", + "foyer-memory", + "fs4", + "futures", + "itertools 0.13.0", + "libc", + "lz4", + "madsim-tokio", + "ordered_hash_map", + "parking_lot", + "paste", "pin-project", - "rustls", - "rustls-pemfile", - "secrecy", + "rand 0.8.5", "serde", - "serde_json", - "serde_yaml", "thiserror", - "tokio 1.36.0", - "tokio-util", - "tower", - "tower-http", "tracing", + "twox-hash", + "zstd", ] [[package]] -name = "kube-core" -version = "0.83.0" +name = "fs4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f924177ad71936cfe612641b45bb9879890696d3c026f0846423529f4fa449af" +checksum = "e8c6b3bd49c37d2aa3f3f2220233b29a7cd23f79d1fe70e5337d25fb390793de" dependencies = [ - "chrono", - "form_urlencoded", - "http 0.2.12", - "json-patch", - "k8s-openapi", - "once_cell", - "schemars", - "serde", - "serde_json", - "thiserror", + "rustix", + "windows-sys 0.52.0", ] [[package]] -name = "kube-derive" -version = "0.83.0" +name = "fuchsia-cprng" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce7c7a14cf3fe567ca856de41db0d61394867675cfb0d65094c55f0fa2df2e0" -dependencies = [ - "darling 0.14.4", - "proc-macro2 1.0.79", - "quote 1.0.35", - "serde_json", - "syn 1.0.109", -] +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" [[package]] -name = "kube-runtime" -version = "0.83.0" +name = "funty" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d5e4d09df25250ffcb09df3f31105a5f49eb8f8a08da9b776ea5b6431ec476f" -dependencies = [ - "ahash", - "async-trait", - "backoff", - "derivative", - "futures 0.3.30", - "json-patch", - "k8s-openapi", - "kube-client", - "parking_lot 0.12.1", - "pin-project", - "serde", - "serde_json", - "smallvec 1.13.1", - "thiserror", - "tokio 1.36.0", - "tokio-util", - "tracing", -] +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] -name = "lazy-regex" -version = "3.1.0" +name = "futures" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d12be4595afdf58bd19e4a9f4e24187da2a66700786ff660a418e9059937a4c" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ - "lazy-regex-proc_macros", - "once_cell", - "regex", + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", ] [[package]] -name = "lazy-regex-proc_macros" -version = "3.1.0" +name = "futures-channel" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bcd58e6c97a7fcbaffcdc95728b393b8d98933bfadad49ed4097845b57ef0b" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "regex", - "syn 2.0.52", + "futures-core", + "futures-sink", ] [[package]] -name = "lazy_static" -version = "1.4.0" +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ - "spin 0.5.2", + "futures-core", + "futures-task", + "futures-util", ] [[package]] -name = "lazycell" -version = "1.3.0" +name = "futures-io" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] -name = "lexical-core" -version = "0.8.5" +name = "futures-macro" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "lexical-parse-float", - "lexical-parse-integer", - "lexical-util", - "lexical-write-float", - "lexical-write-integer", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "lexical-parse-float" -version = "0.8.5" +name = "futures-sink" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" -dependencies = [ - "lexical-parse-integer", - "lexical-util", - "static_assertions", -] +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] -name = "lexical-parse-integer" -version = "0.8.6" +name = "futures-task" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" -dependencies = [ - "lexical-util", - "static_assertions", -] +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] -name = "lexical-util" -version = "0.8.5" +name = "futures-util" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ - "static_assertions", + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", ] [[package]] -name = "lexical-write-float" -version = "0.8.5" +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + +[[package]] +name = "generic-array" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ - "lexical-util", - "lexical-write-integer", - "static_assertions", + "typenum", + "version_check", + "zeroize", ] [[package]] -name = "lexical-write-integer" -version = "0.8.5" +name = "getrandom" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "lexical-util", - "static_assertions", + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", ] [[package]] -name = "libc" -version = "0.2.153" +name = "gimli" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] -name = "libffi" -version = "3.2.0" +name = "glob" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce826c243048e3d5cec441799724de52e2d42f820468431fc3fceee2341871e2" -dependencies = [ - "libc", - "libffi-sys", -] +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] -name = "libffi-sys" -version = "2.3.0" -source = "git+https://github.com/fracek/libffi-rs.git?rev=653781aa9b7a7ac1682e7f8cb405a2e90afc341d#653781aa9b7a7ac1682e7f8cb405a2e90afc341d" +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "cc", + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", ] [[package]] -name = "libloading" -version = "0.7.4" +name = "group" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "cfg-if 1.0.0", - "winapi 0.3.9", + "ff 0.13.0", + "rand_core 0.6.4", + "subtle", ] [[package]] -name = "libloading" -version = "0.8.3" +name = "h2" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "cfg-if 1.0.0", - "windows-targets 0.52.4", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.6.0", + "slab", + "tokio", + "tokio-util", + "tracing", ] [[package]] -name = "libm" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" - -[[package]] -name = "libmdbx" -version = "0.1.12" +name = "h2" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4dadca98c9c806735d149872a4f6ecbdeb99290dea2630270e9415a6c2036e2" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ - "bitflags 1.3.2", - "byteorder", - "derive_more", - "indexmap 1.9.3", - "libc", - "mdbx-sys", - "parking_lot 0.12.1", - "thiserror", + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.6.0", + "slab", + "tokio", + "tokio-util", + "tracing", ] [[package]] -name = "libredox" -version = "0.0.1" +name = "hashbrown" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "bitflags 2.4.2", - "libc", - "redox_syscall 0.4.1", + "ahash 0.7.8", ] [[package]] -name = "libsqlite3-sys" -version = "0.26.0" +name = "hashbrown" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "cc", - "pkg-config", - "vcpkg", + "ahash 0.8.11", ] [[package]] -name = "libz-ng-sys" -version = "1.1.15" +name = "hashbrown" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6409efc61b12687963e602df8ecf70e8ddacf95bc6576bcf16e3ac6328083c5" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "cmake", - "libc", + "ahash 0.8.11", + "allocator-api2", + "serde", ] [[package]] -name = "libz-sys" -version = "1.1.15" +name = "hashbrown" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", + "foldhash", + "serde", ] [[package]] -name = "linked-hash-map" -version = "0.5.6" +name = "heck" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] -name = "linux-raw-sys" -version = "0.4.13" +name = "hermit-abi" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] -name = "lock_api" -version = "0.3.4" +name = "hex" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" dependencies = [ - "scopeguard", + "serde", ] [[package]] -name = "lock_api" -version = "0.4.11" +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hmac" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "autocfg", - "scopeguard", + "digest 0.10.7", ] [[package]] -name = "log" -version = "0.4.20" +name = "home" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] [[package]] -name = "lru" -version = "0.12.3" +name = "http" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "hashbrown 0.14.3", + "bytes", + "fnv", + "itoa", ] [[package]] -name = "lru-cache" -version = "0.1.2" +name = "http" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ - "linked-hash-map", + "bytes", + "fnv", + "itoa", ] [[package]] -name = "match_cfg" -version = "0.1.0" +name = "http-body" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] [[package]] -name = "matchers" -version = "0.1.0" +name = "http-body" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ - "regex-automata 0.1.10", + "bytes", + "http 1.1.0", ] [[package]] -name = "matches" -version = "0.1.10" +name = "http-body-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "pin-project-lite", +] [[package]] -name = "matchit" -version = "0.7.3" +name = "httparse" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] -name = "maybe-uninit" -version = "2.0.0" +name = "httpdate" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] -name = "md-5" -version = "0.10.6" +name = "hyper" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ - "cfg-if 1.0.0", - "digest 0.10.7", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", ] [[package]] -name = "md4" -version = "0.10.2" +name = "hyper" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da5ac363534dce5fabf69949225e174fbf111a498bf0ff794c8ea1fba9f3dda" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ - "digest 0.10.7", + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", ] [[package]] -name = "mdbx-sys" -version = "0.12.3-0" +name = "hyper-named-pipe" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da620b13877ad39ac6543db8859fae54bf0dc29b395172fca813516b9e553a0" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" dependencies = [ - "bindgen", - "cc", - "libc", + "hex", + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", ] [[package]] -name = "memchr" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" - -[[package]] -name = "memmap2" -version = "0.5.10" +name = "hyper-rustls" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ - "libc", + "futures-util", + "http 0.2.12", + "hyper 0.14.30", + "log", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", ] [[package]] -name = "memmem" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64a92489e2744ce060c349162be1c5f33c6969234104dbd99ddb5feb08b8c15" - -[[package]] -name = "memoffset" -version = "0.5.6" +name = "hyper-rustls" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ - "autocfg", + "futures-util", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "rustls 0.23.14", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", + "webpki-roots 0.26.6", ] [[package]] -name = "memoffset" -version = "0.7.1" +name = "hyper-timeout" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "autocfg", + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", ] [[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "mime_guess" -version = "2.0.4" +name = "hyper-tls" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ - "mime", - "unicase", + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", ] [[package]] -name = "minimal-lexical" -version = "0.2.1" +name = "hyper-util" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] [[package]] -name = "miniz_oxide" -version = "0.7.2" +name = "hyperlocal" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" dependencies = [ - "adler", + "hex", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", ] [[package]] -name = "mio" -version = "0.6.23" +name = "iana-time-zone" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow", - "net2", - "slab", - "winapi 0.2.8", + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", ] [[package]] -name = "mio" -version = "0.8.11" +name = "iana-time-zone-haiku" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "libc", - "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "cc", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "ident_case" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio 0.6.23", -] +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] -name = "miow" -version = "0.2.2" +name = "idna" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "unicode-bidi", + "unicode-normalization", ] [[package]] -name = "mockall" -version = "0.11.4" +name = "impl-codec" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "cfg-if 1.0.0", - "downcast", - "fragile", - "lazy_static", - "mockall_derive", - "predicates", - "predicates-tree", + "parity-scale-codec", ] [[package]] -name = "mockall_derive" -version = "0.11.4" +name = "impl-rlp" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" dependencies = [ - "cfg-if 1.0.0", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "rlp", ] [[package]] -name = "mongodb" -version = "2.8.1" +name = "impl-serde" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de59562e5c71656c098d8e966641b31da87b89dc3dcb6e761d3b37dcdfa0cb72" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ - "async-trait", - "base64 0.13.1", - "bitflags 1.3.2", - "bson", - "chrono", - "derivative", - "derive_more", - "futures-core", - "futures-executor", - "futures-io", - "futures-util", - "hex", - "hmac", - "lazy_static", - "md-5", - "pbkdf2 0.11.0", - "percent-encoding", - "rand 0.8.5", - "rustc_version_runtime", - "rustls", - "rustls-pemfile", "serde", - "serde_bytes", - "serde_with 1.14.0", - "sha-1", - "sha2", - "socket2 0.4.10", - "stringprep", - "strsim 0.10.0", - "take_mut", - "thiserror", - "tokio 1.36.0", - "tokio-rustls", - "tokio-util", - "trust-dns-proto 0.21.2", - "trust-dns-resolver 0.21.2", - "typed-builder", - "uuid 1.7.0", - "webpki-roots", ] [[package]] -name = "multer" -version = "2.1.0" +name = "impl-trait-for-tuples" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "bytes 1.5.0", - "encoding_rs", - "futures-util", - "http 0.2.12", - "httparse", - "log", - "memchr", - "mime", - "spin 0.9.8", - "version_check", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "multimap" -version = "0.8.3" +name = "indexmap" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] [[package]] -name = "native-tls" -version = "0.2.11" +name = "indexmap" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", + "equivalent", + "hashbrown 0.15.0", + "serde", ] [[package]] -name = "net2" -version = "0.2.39" +name = "inout" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", + "generic-array", ] [[package]] -name = "netif" -version = "0.1.6" +name = "ipnet" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29a01b9f018d6b7b277fef6c79fdbd9bf17bb2d1e298238055cafab49baa5ee" -dependencies = [ - "libc", - "winapi 0.3.9", -] +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] -name = "new_debug_unreachable" -version = "1.0.4" +name = "is_terminal_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] -name = "nix" -version = "0.26.2" +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ - "bitflags 1.3.2", - "cfg-if 1.0.0", - "libc", - "memoffset 0.7.1", - "pin-utils", - "static_assertions", + "either", ] [[package]] -name = "nix" -version = "0.28.0" +name = "itertools" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ - "bitflags 2.4.2", - "cfg-if 1.0.0", - "cfg_aliases", - "libc", + "either", ] [[package]] -name = "no-std-compat" -version = "0.4.1" +name = "itoa" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] -name = "nom" -version = "5.1.3" +name = "jobserver" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08959a387a676302eebf4ddbcbc611da04285579f76f88ee0506c63b1a61dd4b" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ - "memchr", - "version_check", + "libc", ] [[package]] -name = "nom" -version = "7.1.3" +name = "js-sys" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ - "memchr", - "minimal-lexical", + "wasm-bindgen", ] [[package]] -name = "nonzero_ext" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" - -[[package]] -name = "normalize-line-endings" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" - -[[package]] -name = "notify" -version = "5.0.0" +name = "jsonwebtoken" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2c66da08abae1c024c01d635253e402341b4060a12e99b31c7594063bf490a" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ - "bitflags 1.3.2", - "crossbeam-channel", - "filetime", - "fsevent-sys", - "inotify", - "kqueue", - "libc", - "mio 0.8.11", - "walkdir", - "winapi 0.3.9", + "base64 0.21.7", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", ] [[package]] -name = "ntapi" -version = "0.4.1" +name = "k256" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ - "winapi 0.3.9", + "cfg-if", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "once_cell", + "sha2", ] [[package]] -name = "nu-ansi-term" -version = "0.46.0" +name = "keccak" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ - "overload", - "winapi 0.3.9", + "cpufeatures", ] [[package]] -name = "num" -version = "0.4.1" +name = "keccak-asm" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ - "num-bigint", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", + "digest 0.10.7", + "sha3-asm", ] [[package]] -name = "num-bigint" -version = "0.4.4" +name = "lambdaworks-crypto" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "bbc2a4da0d9e52ccfe6306801a112e81a8fc0c76aa3e4449fefeda7fef72bb34" dependencies = [ - "autocfg", - "num-integer", - "num-traits", - "rand 0.8.5", + "lambdaworks-math", "serde", + "sha2", + "sha3", ] [[package]] -name = "num-bigint-dig" -version = "0.8.4" +name = "lambdaworks-math" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +checksum = "d1bd2632acbd9957afc5aeec07ad39f078ae38656654043bf16e046fa2730e23" dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", "serde", - "smallvec 1.13.1", - "zeroize", + "serde_json", ] [[package]] -name = "num-complex" -version = "0.4.5" +name = "lazy_static" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" -dependencies = [ - "num-traits", -] +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] -name = "num-conv" -version = "0.1.0" +name = "libc" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] -name = "num-integer" -version = "0.1.46" +name = "libm" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libmimalloc-sys" +version = "0.1.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23aa6811d3bd4deb8a84dde645f943476d13b248d818edcf8ce0b2f37f036b44" dependencies = [ - "num-traits", + "cc", + "libc", ] [[package]] -name = "num-iter" -version = "0.1.44" +name = "libredox" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "autocfg", - "num-integer", - "num-traits", + "bitflags 2.6.0", + "libc", ] [[package]] -name = "num-rational" -version = "0.4.1" +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", - "num-bigint", - "num-integer", - "num-traits", + "scopeguard", ] [[package]] -name = "num-traits" -version = "0.2.18" +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "lru" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ - "autocfg", - "libm", + "hashbrown 0.14.5", ] [[package]] -name = "num_cpus" -version = "1.16.0" +name = "lz4" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "4d1febb2b4a79ddd1980eede06a8f7902197960aa0383ffcfdd62fe723036725" dependencies = [ - "hermit-abi", - "libc", + "lz4-sys", ] [[package]] -name = "object" -version = "0.32.2" +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ - "memchr", + "cc", + "libc", ] [[package]] -name = "octocrab" -version = "0.29.3" +name = "madsim" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "170c527fe1bf1d045e1f69a10dfd9b192327d53728b2c3481cf95b9dc2b42998" +checksum = "f88753ddf8d3cd43b9cf71a93626dd9aad3c24086a04420beb31922e1f856d02" dependencies = [ - "arc-swap", - "async-trait", - "base64 0.21.7", - "bytes 1.5.0", - "cfg-if 1.0.0", - "chrono", - "either", - "futures 0.3.30", + "ahash 0.8.11", + "async-channel", + "async-stream", + "async-task", + "bincode", + "bytes", + "downcast-rs", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "hyper-rustls", - "hyper-timeout", - "jsonwebtoken", - "once_cell", - "percent-encoding", - "pin-project", - "secrecy", + "lazy_static", + "libc", + "madsim-macros", + "naive-timer", + "panic-message", + "rand 0.8.5", + "rand_xoshiro", + "rustversion", "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", - "snafu", - "tokio 1.36.0", - "tower", - "tower-http", + "spin", + "tokio", + "tokio-util", + "toml", "tracing", - "url", + "tracing-subscriber", ] [[package]] -name = "oid-registry" -version = "0.6.1" +name = "madsim-macros" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +checksum = "f3d248e97b1a48826a12c3828d921e8548e714394bf17274dd0a93910dc946e1" dependencies = [ - "asn1-rs", + "darling 0.14.4", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "once_cell" -version = "1.19.0" +name = "madsim-tokio" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "7d3eb2acc57c82d21d699119b859e2df70a91dbdb84734885a1e72be83bdecb5" +dependencies = [ + "madsim", + "spin", + "tokio", +] [[package]] -name = "opaque-debug" -version = "0.3.1" +name = "matchers" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] [[package]] -name = "openssl" -version = "0.10.64" +name = "matchit" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" -dependencies = [ - "bitflags 2.4.2", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] -name = "openssl-macros" -version = "0.1.1" +name = "md-5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "cfg-if", + "digest 0.10.7", ] [[package]] -name = "openssl-probe" -version = "0.1.5" +name = "memchr" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] -name = "openssl-sys" -version = "0.9.101" +name = "memmap2" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ - "cc", "libc", - "pkg-config", - "vcpkg", ] [[package]] -name = "opentelemetry" -version = "0.18.0" +name = "metrics" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" +checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" dependencies = [ - "opentelemetry_api", - "opentelemetry_sdk", + "ahash 0.8.11", + "portable-atomic", ] [[package]] -name = "opentelemetry-otlp" -version = "0.11.0" +name = "mimalloc" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1c928609d087790fc936a1067bdc310ae702bdf3b090c3f281b713622c8bbde" +checksum = "68914350ae34959d83f732418d51e2427a794055d0b9529f48259ac07af65633" dependencies = [ - "async-trait", - "futures 0.3.30", - "futures-util", - "http 0.2.12", - "opentelemetry", - "opentelemetry-proto", - "prost", - "thiserror", - "tokio 1.36.0", - "tonic 0.8.3", + "libmimalloc-sys", ] [[package]] -name = "opentelemetry-proto" -version = "0.1.0" +name = "mime" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61a2f56df5574508dd86aaca016c917489e589ece4141df1b5e349af8d66c28" -dependencies = [ - "futures 0.3.30", - "futures-util", - "opentelemetry", - "prost", - "tonic 0.8.3", - "tonic-build 0.8.4", -] +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] -name = "opentelemetry_api" -version = "0.18.0" +name = "miniz_oxide" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "fnv", - "futures-channel", - "futures-util", - "indexmap 1.9.3", - "js-sys", - "once_cell", - "pin-project-lite", - "thiserror", + "adler2", ] [[package]] -name = "opentelemetry_sdk" -version = "0.18.0" +name = "minstant" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +checksum = "1fb9b5c752f145ac5046bccc3c4f62892e3c950c1d1eab80c5949cd68a2078db" dependencies = [ - "async-trait", - "crossbeam-channel", - "dashmap", - "fnv", - "futures-channel", - "futures-executor", - "futures-util", - "once_cell", - "opentelemetry_api", - "percent-encoding", - "rand 0.8.5", - "thiserror", - "tokio 1.36.0", - "tokio-stream", + "ctor", + "web-time", ] [[package]] -name = "ordered-float" -version = "2.10.1" +name = "mio" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ - "num-traits", + "hermit-abi", + "libc", + "wasi", + "windows-sys 0.52.0", ] [[package]] -name = "outref" -version = "0.1.0" +name = "multimap" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f222829ae9293e33a9f5e9f440c6760a3d450a64affe1846486b140db81c1f4" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] -name = "outref" -version = "0.5.1" +name = "munge" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" +checksum = "64142d38c84badf60abf06ff9bd80ad2174306a5b11bd4706535090a30a419df" +dependencies = [ + "munge_macro", +] [[package]] -name = "overload" -version = "0.1.1" +name = "munge_macro" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "1bb5c1d8184f13f7d0ccbeeca0def2f9a181bce2624302793005f5ca8aa62e5e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] [[package]] -name = "p224" -version = "0.13.2" +name = "naive-timer" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30c06436d66652bc2f01ade021592c80a2aad401570a18aa18b82e440d2b9aa1" -dependencies = [ - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", - "primeorder", - "sha2", -] +checksum = "034a0ad7deebf0c2abcf2435950a6666c3c15ea9d8fad0c0f48efa8a7f843fed" [[package]] -name = "p256" -version = "0.11.1" +name = "nanorand" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2", + "getrandom", ] [[package]] -name = "p256" -version = "0.13.2" +name = "native-tls" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", - "primeorder", - "sha2", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] -name = "p384" -version = "0.11.2" +name = "nix" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2", + "bitflags 2.6.0", + "cfg-if", + "cfg_aliases", + "libc", ] [[package]] -name = "p384" -version = "0.13.0" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", - "primeorder", - "sha2", + "overload", + "winapi", ] [[package]] -name = "papergrid" -version = "0.10.0" +name = "nu-ansi-term" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ccbe15f2b6db62f9a9871642746427e297b0ceb85f9a7f1ee5ff47d184d0c8" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "bytecount", - "fnv", - "unicode-width", + "windows-sys 0.52.0", ] [[package]] -name = "parity-scale-codec" -version = "3.6.9" +name = "num-bigint" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", + "num-integer", + "num-traits", ] [[package]] -name = "parity-scale-codec-derive" -version = "3.6.9" +name = "num-conv" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "proc-macro-crate 2.0.2", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "num-traits", ] [[package]] -name = "parking" -version = "2.2.0" +name = "num-traits" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] [[package]] -name = "parking_lot" -version = "0.9.0" +name = "num_cpus" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.6.3", - "rustc_version 0.2.3", + "hermit-abi", + "libc", ] [[package]] -name = "parking_lot" -version = "0.12.1" +name = "num_enum" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "lock_api 0.4.11", - "parking_lot_core 0.9.9", + "num_enum_derive", ] [[package]] -name = "parking_lot_core" -version = "0.6.3" +name = "num_enum_derive" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66b810a62be75176a80873726630147a5ca780cd33921e0b5709033e66b0a" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "rustc_version 0.2.3", - "smallvec 0.6.14", - "winapi 0.3.9", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "parking_lot_core" -version = "0.9.9" +name = "num_threads" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ - "cfg-if 1.0.0", "libc", - "redox_syscall 0.4.1", - "smallvec 1.13.1", - "windows-targets 0.48.5", ] [[package]] -name = "parquet" -version = "41.0.0" +name = "object" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6880c32d81884ac4441d9f4b027df8561be23b54f3ac1e62086fa42753dd3faa" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ - "ahash", - "arrow-array", - "arrow-buffer", - "arrow-cast", - "arrow-data", - "arrow-ipc", - "arrow-schema", - "arrow-select", - "base64 0.21.7", - "bytes 1.5.0", - "chrono", - "hashbrown 0.13.2", - "num", - "num-bigint", - "paste", - "seq-macro", - "thrift", - "twox-hash", + "memchr", ] [[package]] -name = "password-hash" -version = "0.5.0" +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "openssl" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ - "base64ct", - "rand_core 0.6.4", - "subtle", + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", ] [[package]] -name = "paste" -version = "1.0.14" +name = "openssl-macros" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] [[package]] -name = "path-clean" -version = "0.1.0" +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecba01bf2678719532c5e3059e0b5f0811273d94b397088b82e3bd0a78c78fdd" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "pathdiff" -version = "0.2.1" +name = "openssl-sys" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] -name = "pbjson" -version = "0.5.1" +name = "opentelemetry" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048f9ac93c1eab514f9470c4bc8d97ca2a0a236b84f45cc19d69a59fc11467f6" +checksum = "4c365a63eec4f55b7efeceb724f1336f26a9cf3427b70e59e2cd2a5b947fba96" dependencies = [ - "base64 0.13.1", - "serde", + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", ] [[package]] -name = "pbjson-build" -version = "0.5.1" +name = "opentelemetry-otlp" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdbb7b706f2afc610f3853550cdbbf6372fd324824a087806bd4480ea4996e24" +checksum = "6b925a602ffb916fb7421276b86756027b37ee708f9dce2dbdcc51739f07e727" dependencies = [ - "heck", - "itertools", + "async-trait", + "futures-core", + "http 1.1.0", + "opentelemetry", + "opentelemetry-proto", + "opentelemetry_sdk", "prost", - "prost-types", + "thiserror", + "tokio", + "tonic", ] [[package]] -name = "pbjson-types" -version = "0.5.1" +name = "opentelemetry-proto" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a88c8d87f99a4ac14325e7a4c24af190fca261956e3b82dd7ed67e77e6c7043" +checksum = "30ee9f20bff9c984511a02f082dc8ede839e4a9bf15cc2487c8d6fea5ad850d9" dependencies = [ - "bytes 1.5.0", - "chrono", - "pbjson", - "pbjson-build", + "opentelemetry", + "opentelemetry_sdk", "prost", - "prost-build", - "serde", + "tonic", ] [[package]] -name = "pbkdf2" -version = "0.11.0" +name = "opentelemetry_sdk" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +checksum = "692eac490ec80f24a17828d49b40b60f5aeaccdfe6a503f939713afd22bc28df" dependencies = [ - "digest 0.10.7", + "async-trait", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry", + "percent-encoding", + "rand 0.8.5", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", ] [[package]] -name = "pbkdf2" -version = "0.12.2" +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "ordered_hash_map" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +checksum = "ab0e5f22bf6dd04abd854a8874247813a8fa2c8c1260eba6fbb150270ce7c176" dependencies = [ - "digest 0.10.7", - "hmac", + "hashbrown 0.13.2", ] [[package]] -name = "peeking_take_while" -version = "0.1.2" +name = "outref" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" [[package]] -name = "pem" -version = "1.1.1" +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "p256" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "base64 0.13.1", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2", ] [[package]] -name = "pem-rfc7468" -version = "0.6.0" +name = "panic-message" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384e52fd8fbd4cbe3c317e8216260c21a0f9134de108cea8a4dd4e7e152c472d" + +[[package]] +name = "parity-scale-codec" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ - "base64ct", + "arrayvec", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", ] [[package]] -name = "pem-rfc7468" -version = "0.7.0" +name = "parity-scale-codec-derive" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "base64ct", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "percent-encoding" -version = "2.3.0" +name = "parking" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] -name = "petgraph" -version = "0.6.4" +name = "parking_lot" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ - "fixedbitset", - "indexmap 2.2.5", + "lock_api", + "parking_lot_core", ] [[package]] -name = "phf" -version = "0.10.1" +name = "parking_lot_core" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "phf_macros 0.10.0", - "phf_shared 0.10.0", - "proc-macro-hack", + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", ] [[package]] -name = "phf" -version = "0.11.2" +name = "parse-display" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "914a1c2265c98e2446911282c6ac86d8524f495792c38c5bd884f80499c7538a" dependencies = [ - "phf_macros 0.11.2", - "phf_shared 0.11.2", + "parse-display-derive", + "regex", + "regex-syntax 0.8.5", ] [[package]] -name = "phf_generator" -version = "0.10.0" +name = "parse-display-derive" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" +checksum = "2ae7800a4c974efd12df917266338e79a7a74415173caf7e70aa0a0707345281" dependencies = [ - "phf_shared 0.10.0", - "rand 0.8.5", + "proc-macro2", + "quote", + "regex", + "regex-syntax 0.8.5", + "structmeta", + "syn 2.0.79", ] [[package]] -name = "phf_generator" -version = "0.11.2" +name = "paste" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" -dependencies = [ - "phf_shared 0.11.2", - "rand 0.8.5", -] +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] -name = "phf_macros" -version = "0.10.0" +name = "pbkdf2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fdf3184dd560f160dd73922bea2d5cd6e8f064bf4b13110abd81b03697b4e0" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "phf_generator 0.10.0", - "phf_shared 0.10.0", - "proc-macro-hack", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "digest 0.10.7", ] [[package]] -name = "phf_macros" -version = "0.11.2" +name = "pem" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "phf_generator 0.11.2", - "phf_shared 0.11.2", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "base64 0.22.1", + "serde", ] [[package]] -name = "phf_shared" -version = "0.10.0" +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ - "siphasher 0.3.11", + "memchr", + "thiserror", + "ucd-trie", ] [[package]] -name = "phf_shared" -version = "0.11.2" +name = "petgraph" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ - "siphasher 0.3.11", + "fixedbitset", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -6250,17 +4150,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" -dependencies = [ - "der 0.7.8", - "pkcs8 0.10.2", - "spki 0.7.3", -] - [[package]] name = "pkcs8" version = "0.9.0" @@ -6277,103 +4166,21 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.8", + "der 0.7.9", "spki 0.7.3", ] [[package]] name = "pkg-config" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - -[[package]] -name = "platforms" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" - -[[package]] -name = "pmutil" -version = "0.6.1" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a40bc70c2c58040d2d8b167ba9a5ff59fc9dab7ad44771cfde3dcfde7a09c6" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "polyval" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "opaque-debug", - "universal-hash", -] +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "portable-atomic" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" - -[[package]] -name = "portpicker" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" -dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "postgres-native-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d442770e2b1e244bb5eb03b31c79b65bb2568f413b899eaba850fa945a65954" -dependencies = [ - "futures 0.3.30", - "native-tls", - "tokio 1.36.0", - "tokio-native-tls", - "tokio-postgres", -] - -[[package]] -name = "postgres-protocol" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" -dependencies = [ - "base64 0.21.7", - "byteorder", - "bytes 1.5.0", - "fallible-iterator", - "hmac", - "md-5", - "memchr", - "rand 0.8.5", - "sha2", - "stringprep", -] - -[[package]] -name = "postgres-types" -version = "0.2.6" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d2234cdee9408b523530a9b6d2d6b373d1db34f6a8e51dc03ded1828d7fb67c" -dependencies = [ - "bytes 1.5.0", - "fallible-iterator", - "postgres-protocol", - "serde", - "serde_json", -] +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -6383,57 +4190,21 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "predicates" -version = "2.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" -dependencies = [ - "difflib", - "float-cmp", - "itertools", - "normalize-line-endings", - "predicates-core", - "regex", -] - -[[package]] -name = "predicates-core" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" - -[[package]] -name = "predicates-tree" -version = "1.0.9" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "predicates-core", - "termtree", + "zerocopy", ] [[package]] name = "prettyplease" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" -dependencies = [ - "proc-macro2 1.0.79", - "syn 1.0.109", -] - -[[package]] -name = "primeorder" -version = "0.13.6" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ - "elliptic-curve 0.13.8", + "proc-macro2", + "syn 2.0.79", ] [[package]] @@ -6451,22 +4222,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - -[[package]] -name = "proc-macro-crate" -version = "2.0.2" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_datetime", - "toml_edit 0.20.2", + "toml_edit", ] [[package]] @@ -6476,9 +4236,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "proc-macro2", + "quote", "version_check", ] @@ -6488,111 +4247,153 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", + "proc-macro2", + "quote", "version_check", ] [[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] [[package]] -name = "proc-macro2" -version = "0.4.30" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ - "unicode-xid 0.1.0", + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.6.0", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.5", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "prost" -version = "0.11.9" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ - "bytes 1.5.0", + "bytes", "prost-derive", ] [[package]] name = "prost-build" -version = "0.11.9" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ - "bytes 1.5.0", + "bytes", "heck", - "itertools", - "lazy_static", + "itertools 0.13.0", "log", "multimap", + "once_cell", "petgraph", "prettyplease", "prost", "prost-types", "regex", - "syn 1.0.109", + "syn 2.0.79", "tempfile", - "which", ] [[package]] name = "prost-derive" -version = "0.11.9" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "prost-types" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +dependencies = [ + "prost", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" dependencies = [ - "anyhow", - "itertools", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "ptr_meta_derive 0.1.4", ] [[package]] -name = "prost-types" -version = "0.11.9" +name = "ptr_meta" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +checksum = "fe9e76f66d3f9606f44e45598d155cb13ecf09f4a28199e48daf8c8fc937ea90" dependencies = [ - "prost", + "ptr_meta_derive 0.3.0", ] [[package]] -name = "psm" -version = "0.1.21" +name = "ptr_meta_derive" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "cc", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "quanta" -version = "0.12.2" +name = "ptr_meta_derive" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +checksum = "ca414edb151b4c8d125c12566ab0d74dc9cdba36fb80eb7b848c15f495fd32d1" dependencies = [ - "crossbeam-utils 0.8.19", - "libc", - "once_cell", - "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", - "web-sys", - "winapi 0.3.9", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -6602,43 +4403,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] -name = "quickcheck" -version = "1.0.3" +name = "quinn" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ - "env_logger", - "log", - "rand 0.8.5", + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.14", + "socket2", + "thiserror", + "tokio", + "tracing", ] [[package]] -name = "quickcheck_macros" -version = "1.0.0" +name = "quinn-proto" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "bytes", + "rand 0.8.5", + "ring", + "rustc-hash", + "rustls 0.23.14", + "slab", + "thiserror", + "tinyvec", + "tracing", ] [[package]] -name = "quote" -version = "0.6.13" +name = "quinn-udp" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ - "proc-macro2 0.4.30", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "proc-macro2 1.0.79", + "proc-macro2", ] [[package]] @@ -6648,29 +4466,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] -name = "rand" -version = "0.4.6" +name = "rancor" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +checksum = "caf5f7161924b9d1cea0e4cabc97c372cea92b5f927fc13c6bca67157a0ad947" dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi 0.3.9", + "ptr_meta 0.3.0", ] [[package]] name = "rand" -version = "0.7.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" dependencies = [ - "getrandom 0.1.16", + "fuchsia-cprng", "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", + "rand_core 0.3.1", + "rdrand", + "winapi", ] [[package]] @@ -6680,18 +4494,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", + "rand_chacha", "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "serde", ] [[package]] @@ -6719,40 +4524,31 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", + "getrandom", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "rand_xorshift" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.5.1", + "rand_core 0.6.4", ] [[package]] -name = "raw-cpuid" -version = "11.0.1" +name = "rand_xoshiro" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" dependencies = [ - "bitflags 2.4.2", + "rand_core 0.6.4", ] [[package]] @@ -6764,61 +4560,36 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "redis" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c580d9cbbe1d1b479e8d67cf9daf6a62c957e6846048408b80b43ac3f6af84cd" -dependencies = [ - "combine", - "itoa", - "percent-encoding", - "rustls", - "rustls-native-certs", - "rustls-pemfile", - "rustls-webpki", - "ryu", - "sha1_smol", - "socket2 0.4.10", - "url", -] - -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.12", + "getrandom", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.3" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.2", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -6832,20 +4603,20 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", ] [[package]] name = "regex-lite" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" [[package]] name = "regex-syntax" @@ -6855,15 +4626,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - -[[package]] -name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "remove_dir_all" @@ -6871,26 +4636,43 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", +] + +[[package]] +name = "rend" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" +dependencies = [ + "bytecheck 0.6.12", +] + +[[package]] +name = "rend" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c1f1959e4db12c985c0283656be0925f1539549db1e47c4bd0b8b599e1ef7" +dependencies = [ + "bytecheck 0.8.0", ] [[package]] name = "reqwest" -version = "0.11.26" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "async-compression", "base64 0.21.7", - "bytes 1.5.0", + "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", - "hyper-rustls", + "hyper 0.14.30", + "hyper-rustls 0.24.2", "ipnet", "js-sys", "log", @@ -6898,43 +4680,69 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-pemfile", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", - "tokio 1.36.0", - "tokio-rustls", - "tokio-socks", - "tokio-util", + "tokio", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-streams", "web-sys", - "webpki-roots", + "webpki-roots 0.25.4", "winreg", ] [[package]] -name = "resolv-conf" -version = "0.7.0" +name = "reqwest" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ - "hostname", - "quick-error", + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls 0.27.3", + "hyper-tls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-native-tls", + "tokio-rustls 0.26.0", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 0.26.6", + "windows-registry", ] -[[package]] -name = "retain_mut" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" - [[package]] name = "rfc6979" version = "0.3.1" @@ -6958,41 +4766,76 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.20" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", + "getrandom", "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi 0.3.9", + "spin", + "untrusted", + "windows-sys 0.52.0", ] [[package]] -name = "ring" -version = "0.17.8" +name = "rkyv" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ - "cc", - "cfg-if 1.0.0", - "getrandom 0.2.12", - "libc", - "spin 0.9.8", - "untrusted 0.9.0", - "windows-sys 0.52.0", + "bitvec", + "bytecheck 0.6.12", + "bytes", + "hashbrown 0.12.3", + "ptr_meta 0.1.4", + "rend 0.4.2", + "rkyv_derive 0.7.45", + "seahash", + "tinyvec", + "uuid 1.10.0", ] [[package]] -name = "ripemd" -version = "0.1.3" +name = "rkyv" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +checksum = "395027076c569819ea6035ee62e664f5e03d74e281744f55261dd1afd939212b" dependencies = [ - "digest 0.10.7", + "bytecheck 0.8.0", + "bytes", + "hashbrown 0.14.5", + "indexmap 2.6.0", + "munge", + "ptr_meta 0.3.0", + "rancor", + "rend 0.5.1", + "rkyv_derive 0.8.8", + "tinyvec", + "uuid 1.10.0", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "rkyv_derive" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cb82b74b4810f07e460852c32f522e979787691b0b7b7439fe473e49d49b2f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -7001,55 +4844,83 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ - "bytes 1.5.0", + "bytes", "rustc-hex", ] [[package]] -name = "rsa" -version = "0.9.6" +name = "roaring" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" +checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" dependencies = [ - "const-oid", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", + "bytemuck", + "byteorder", +] + +[[package]] +name = "rtrb" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3f94e84c073f3b85d4012b44722fa8842b9986d741590d4f2636ad0a5b14143" + +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", "num-traits", - "pkcs1", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "signature 2.2.0", - "spki 0.7.3", - "subtle", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", "zeroize", ] [[package]] -name = "rusqlite" -version = "0.29.0" +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rust_decimal" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ - "bitflags 2.4.2", - "fallible-iterator", - "fallible-streaming-iterator", - "hashlink", - "libsqlite3-sys", - "smallvec 1.13.1", + "arrayvec", + "borsh", + "bytes", + "num-traits", + "rand 0.8.5", + "rkyv 0.7.45", + "serde", + "serde_json", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc-hex" @@ -7059,74 +4930,96 @@ checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" dependencies = [ - "semver 0.9.0", + "semver 0.11.0", ] [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] -name = "rustc_version_runtime" -version = "0.2.1" +name = "rustix" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d31b7153270ebf48bf91c65ae5b0c00e749c4cfad505f66530ac74950249582f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "rustc_version 0.2.3", - "semver 0.9.0", + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", ] [[package]] -name = "rusticata-macros" -version = "4.1.0" +name = "rustls" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ - "nom 7.1.3", + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", ] [[package]] -name = "rustix" -version = "0.38.31" +name = "rustls" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ - "bitflags 2.4.2", - "errno 0.3.8", - "libc", - "linux-raw-sys", - "windows-sys 0.52.0", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.8", + "subtle", + "zeroize", ] [[package]] -name = "rustls" -version = "0.21.10" +name = "rustls-native-certs" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ - "log", - "ring 0.17.8", - "rustls-webpki", - "sct", + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", ] [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.2.0", + "rustls-pki-types", "schannel", "security-framework", ] @@ -7141,15 +5034,19 @@ dependencies = [ ] [[package]] -name = "rustls-tokio-stream" -version = "0.2.7" +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abd2fa2e122bbf891a7333bf2091d8130367d8c381913821b24389208a3db45" -dependencies = [ - "futures 0.3.30", - "rustls", - "tokio 1.36.0", -] +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" @@ -7157,37 +5054,44 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] -name = "rustversion" -version = "1.0.14" +name = "rustls-webpki" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] [[package]] -name = "ryu" +name = "rustversion" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] -name = "ryu-js" -version = "1.0.1" +name = "rusty-fork" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad97d4ce1560a5e27cec89519dc8300d1aa6035b099821261c651486a19e44d5" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] [[package]] -name = "saffron" -version = "0.1.0" +name = "ryu" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03fb9a628596fc7590eb7edbf7b0613287be78df107f5f97b118aad59fb2eea9" -dependencies = [ - "chrono", - "nom 5.1.3", -] +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "salsa20" @@ -7198,54 +5102,15 @@ dependencies = [ "cipher", ] -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - [[package]] name = "schannel" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "schemars" -version = "0.8.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" -dependencies = [ - "dyn-clone", - "schemars_derive", - "serde", - "serde_json", -] - -[[package]] -name = "schemars_derive" -version = "0.8.16" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "serde_derive_internals", - "syn 1.0.109", + "windows-sys 0.59.0", ] -[[package]] -name = "scoped-tls" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" - [[package]] name = "scopeguard" version = "1.2.0" @@ -7259,19 +5124,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" dependencies = [ "hmac", - "pbkdf2 0.11.0", - "salsa20", - "sha2", -] - -[[package]] -name = "scrypt" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" -dependencies = [ - "password-hash", - "pbkdf2 0.12.2", + "pbkdf2", "salsa20", "sha2", ] @@ -7282,10 +5135,16 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", + "ring", + "untrusted", ] +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "sec1" version = "0.3.0" @@ -7294,7 +5153,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct 0.1.1", "der 0.6.1", - "generic-array 0.14.7", + "generic-array", "pkcs8 0.9.0", "subtle", "zeroize", @@ -7307,49 +5166,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.8", - "generic-array 0.14.7", + "der 0.7.9", + "generic-array", "pkcs8 0.10.2", "subtle", "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" -dependencies = [ - "rand 0.8.5", - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", -] - -[[package]] -name = "secrecy" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" -dependencies = [ - "serde", - "zeroize", -] - [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -7358,9 +5188,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -7368,89 +5198,56 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ "semver-parser", ] [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "seq-macro" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" - -[[package]] -name = "serde" -version = "1.0.197" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-value" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" dependencies = [ - "ordered-float", - "serde", + "pest", ] [[package]] -name = "serde_bytes" -version = "0.11.14" +name = "serde" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ - "serde", + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.197" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "serde_derive_internals" -version = "0.26.0" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "indexmap 2.2.5", "itoa", + "memchr", "ryu", "serde", ] @@ -7467,24 +5264,23 @@ dependencies = [ ] [[package]] -name = "serde_path_to_error" -version = "0.1.16" +name = "serde_repr" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ - "itoa", - "serde", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "serde_qs" -version = "0.8.5" +name = "serde_spanned" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ - "percent-encoding", "serde", - "thiserror", ] [[package]] @@ -7499,110 +5295,34 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_v8" -version = "0.133.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ea791b0c83c4f4c6684b6d2e04aa8f936f3abbdff613624553fe5b80ea7c0c" -dependencies = [ - "bytes 1.5.0", - "derive_more", - "num-bigint", - "serde", - "serde_bytes", - "smallvec 1.13.1", - "thiserror", - "v8", -] - -[[package]] -name = "serde_v8" -version = "0.134.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b43265d540cbeb168d730b5df2069f8dfb9de318201f4e8f4f956876266432af" -dependencies = [ - "bytes 1.5.0", - "derive_more", - "num-bigint", - "serde", - "serde_bytes", - "smallvec 1.13.1", - "thiserror", - "v8", -] - -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" -version = "2.3.3" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.6.0", "serde", + "serde_derive", "serde_json", - "serde_with_macros 2.3.3", + "serde_with_macros", "time", ] [[package]] name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "serde_with_macros" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" -dependencies = [ - "darling 0.20.8", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "serde_yaml" -version = "0.9.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" -dependencies = [ - "indexmap 2.2.5", - "itoa", - "ryu", - "serde", - "unsafe-libyaml", -] - -[[package]] -name = "sha-1" -version = "0.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", + "darling 0.20.10", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -7611,24 +5331,18 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -7643,6 +5357,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -7660,9 +5384,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -7684,198 +5408,59 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", -] - -[[package]] -name = "simd-abstraction" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cadb29c57caadc51ff8346233b5cec1d240b68ce55cf1afc764818791876987" -dependencies = [ - "outref 0.1.0", -] - -[[package]] -name = "simdutf8" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" - -[[package]] -name = "similar" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" -dependencies = [ - "bstr", - "unicode-segmentation", -] - -[[package]] -name = "similar-asserts" -version = "1.4.2" -source = "git+https://github.com/bigherc18/similar-asserts.git#63eef1b71d2d21648c3b8c992c616a7c4b74d747" -dependencies = [ - "console", - "serde", - "similar", -] - -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror", - "time", -] - -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - -[[package]] -name = "siphasher" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ac45299ccbd390721be55b412d41931911f654fa99e2cb8bfb57184b2061fe" - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" - -[[package]] -name = "smartstring" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29" -dependencies = [ - "autocfg", - "static_assertions", - "version_check", -] - -[[package]] -name = "snafu" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" -dependencies = [ - "backtrace", - "doc-comment", - "snafu-derive", + "rand_core 0.6.4", ] [[package]] -name = "snafu-derive" -version = "0.7.5" +name = "simdutf8" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" -dependencies = [ - "heck", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] -name = "socket2" -version = "0.4.10" +name = "simple_asn1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "libc", - "winapi 0.3.9", + "num-bigint", + "num-traits", + "thiserror", + "time", ] [[package]] -name = "socket2" -version = "0.5.6" +name = "slab" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "libc", - "windows-sys 0.52.0", + "autocfg", ] [[package]] -name = "sourcemap" -version = "6.4.1" +name = "smallvec" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4cbf65ca7dc576cf50e21f8d0712d96d4fcfd797389744b7b222a85cdf5bd90" -dependencies = [ - "data-encoding", - "debugid", - "if_chain", - "rustc_version 0.2.3", - "serde", - "serde_json", - "unicode-id", - "url", -] +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] -name = "sourcemap" -version = "7.1.1" +name = "socket2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7768edd06c02535e0d50653968f46e1e0d3aa54742190d35dd9466f59de9c71" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ - "base64-simd 0.7.0", - "data-encoding", - "debugid", - "if_chain", - "rustc_version 0.2.3", - "serde", - "serde_json", - "unicode-id-start", - "url", + "libc", + "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - -[[package]] -name = "spinning_top" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" dependencies = [ - "lock_api 0.4.11", + "lock_api", ] [[package]] @@ -7895,32 +5480,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.8", -] - -[[package]] -name = "stacker" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce" -dependencies = [ - "cc", - "cfg-if 1.0.0", - "libc", - "psm", - "winapi 0.3.9", + "der 0.7.9", ] [[package]] name = "starknet" -version = "0.10.0" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0c9ac3809cc7630784e8c8565fa3013af819d83c97aa2720d566016d439011" dependencies = [ "starknet-accounts", "starknet-contract", "starknet-core", "starknet-crypto", - "starknet-ff", "starknet-macros", "starknet-providers", "starknet-signers", @@ -7928,12 +5500,14 @@ dependencies = [ [[package]] name = "starknet-accounts" -version = "0.9.0" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee27ded58ade61da410fccafd57ed5429b0e79a9d62a4ae8b65818cb9d6f400" dependencies = [ "async-trait", "auto_impl", "starknet-core", + "starknet-crypto", "starknet-providers", "starknet-signers", "thiserror", @@ -7941,12 +5515,13 @@ dependencies = [ [[package]] name = "starknet-contract" -version = "0.9.0" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6ee5762d24c4f06ab7e9406550925df406712e73719bd2de905c879c674a87" dependencies = [ "serde", "serde_json", - "serde_with 2.3.3", + "serde_with", "starknet-accounts", "starknet-core", "starknet-providers", @@ -7955,25 +5530,28 @@ dependencies = [ [[package]] name = "starknet-core" -version = "0.10.0" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538240cbe6663c673fe77465f294da707080f39678dd7066761554899e46100" dependencies = [ "base64 0.21.7", + "crypto-bigint 0.5.5", "flate2", "hex", "serde", "serde_json", "serde_json_pythonic", - "serde_with 2.3.3", + "serde_with", "sha3", "starknet-crypto", - "starknet-ff", + "starknet-types-core", ] [[package]] name = "starknet-crypto" -version = "0.6.2" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a5064173a8e8d2675e67744fd07f310de44573924b6b7af225a6bdd8102913" dependencies = [ "crypto-bigint 0.5.5", "hex", @@ -7983,67 +5561,46 @@ dependencies = [ "num-traits", "rfc6979 0.4.0", "sha2", - "starknet-crypto-codegen", "starknet-curve", - "starknet-ff", + "starknet-types-core", "zeroize", ] -[[package]] -name = "starknet-crypto-codegen" -version = "0.3.3" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" -dependencies = [ - "starknet-curve", - "starknet-ff", - "syn 2.0.52", -] - [[package]] name = "starknet-curve" -version = "0.4.2" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" -dependencies = [ - "starknet-ff", -] - -[[package]] -name = "starknet-ff" -version = "0.3.7" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcde6bd74269b8161948190ace6cf069ef20ac6e79cd2ba09b320efa7500b6de" dependencies = [ - "ark-ff", - "bigdecimal", - "crypto-bigint 0.5.5", - "getrandom 0.2.12", - "hex", - "num-bigint", - "serde", + "starknet-types-core", ] [[package]] name = "starknet-macros" -version = "0.1.7" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8986a940af916fc0a034f4e42c6ba76d94f1e97216d75447693dfd7aefaf3ef2" dependencies = [ "starknet-core", - "syn 2.0.52", + "syn 2.0.79", ] [[package]] name = "starknet-providers" -version = "0.10.0" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60e8e69ba7a36dea2d28333be82b4011f8784333d3ae5618482b6587c1ffb66c" dependencies = [ "async-trait", "auto_impl", "ethereum-types", "flate2", + "getrandom", "log", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", - "serde_with 2.3.3", + "serde_with", "starknet-core", "thiserror", "url", @@ -8051,13 +5608,15 @@ dependencies = [ [[package]] name = "starknet-signers" -version = "0.8.0" -source = "git+https://github.com/xJonathanLEI/starknet-rs?rev=7153d0e42#7153d0e42112f8c1348029557aa6cf2b881a5c84" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70b9e01b61ae51d722e2b100d6ef913c5a2e70d1ea672733d385f7296d6055ef" dependencies = [ "async-trait", "auto_impl", "crypto-bigint 0.5.5", "eth-keystore", + "getrandom", "rand 0.8.5", "starknet-core", "starknet-crypto", @@ -8065,445 +5624,132 @@ dependencies = [ ] [[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "string_enum" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fa4d4f81d7c05b9161f8de839975d3326328b8ba2831164b465524cc2f55252" -dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "swc_macros_common", - "syn 2.0.52", -] - -[[package]] -name = "stringprep" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" -dependencies = [ - "finl_unicode", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strsim" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" - -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck", - "proc-macro2 1.0.79", - "quote 1.0.35", - "rustversion", - "syn 2.0.52", -] - -[[package]] -name = "subtle" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" - -[[package]] -name = "swc_atoms" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a9e1b6d97f27b6abe5571f8fe3bdbd2fa987299fc2126450c7cde6214896ef" -dependencies = [ - "hstr", - "once_cell", - "rustc-hash", - "serde", -] - -[[package]] -name = "swc_common" -version = "0.33.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccb656cd57c93614e4e8b33a60e75ca095383565c1a8d2bbe6a1103942831e0" -dependencies = [ - "ast_node", - "better_scoped_tls", - "cfg-if 1.0.0", - "either", - "from_variant", - "new_debug_unreachable", - "num-bigint", - "once_cell", - "rustc-hash", - "serde", - "siphasher 0.3.11", - "sourcemap 6.4.1", - "swc_atoms", - "swc_eq_ignore_macros", - "swc_visit", - "tracing", - "unicode-width", - "url", -] - -[[package]] -name = "swc_config" +name = "starknet-types-core" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba1c7a40d38f9dd4e9a046975d3faf95af42937b34b2b963be4d8f01239584b" -dependencies = [ - "indexmap 1.9.3", - "serde", - "serde_json", - "swc_config_macro", -] - -[[package]] -name = "swc_config_macro" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5b5aaca9a0082be4515f0fbbecc191bf5829cd25b5b9c0a2810f6a2bb0d6829" -dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "swc_macros_common", - "syn 2.0.52", -] - -[[package]] -name = "swc_ecma_ast" -version = "0.110.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3d416121da2d56bcbd1b1623725a68890af4552fef0c6d1e4bfa92776ccd6a" -dependencies = [ - "bitflags 2.4.2", - "is-macro", - "num-bigint", - "phf 0.11.2", - "scoped-tls", - "serde", - "string_enum", - "swc_atoms", - "swc_common", - "unicode-id", -] - -[[package]] -name = "swc_ecma_codegen" -version = "0.146.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7b37ef40385cc2e294ece3d42048dcda6392838724dd5f02ff8da3fa105271" -dependencies = [ - "memchr", - "num-bigint", - "once_cell", - "rustc-hash", - "serde", - "sourcemap 6.4.1", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_codegen_macros", - "tracing", -] - -[[package]] -name = "swc_ecma_codegen_macros" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcdff076dccca6cc6a0e0b2a2c8acfb066014382bc6df98ec99e755484814384" -dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "swc_macros_common", - "syn 2.0.52", -] - -[[package]] -name = "swc_ecma_loader" -version = "0.45.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31cf7549feec3698d0110a0a71ae547f31ae272dc92db3285ce126d6dcbdadf3" -dependencies = [ - "anyhow", - "pathdiff", - "serde", - "swc_common", - "tracing", -] - -[[package]] -name = "swc_ecma_parser" -version = "0.141.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9590deff1b29aafbff8901b9d38d00211393f6b17b5cab878562db89a8966d88" +checksum = "fa1b9e01ccb217ab6d475c5cda05dbb22c30029f7bb52b192a010a00d77a3d74" dependencies = [ - "either", - "new_debug_unreachable", + "lambdaworks-crypto", + "lambdaworks-math", "num-bigint", + "num-integer", "num-traits", - "phf 0.11.2", - "serde", - "smallvec 1.13.1", - "smartstring", - "stacker", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "tracing", - "typed-arena", -] - -[[package]] -name = "swc_ecma_transforms_base" -version = "0.134.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d74ca42a400257d8563624122813c1849c3d87e7abe3b9b2ed7514c76f64ad2f" -dependencies = [ - "better_scoped_tls", - "bitflags 2.4.2", - "indexmap 1.9.3", - "once_cell", - "phf 0.11.2", - "rustc-hash", "serde", - "smallvec 1.13.1", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_parser", - "swc_ecma_utils", - "swc_ecma_visit", - "tracing", -] - -[[package]] -name = "swc_ecma_transforms_classes" -version = "0.123.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e68880cf7d65b93e0446b3ee079f33d94e0eddac922f75b736a6ea7669517c0" -dependencies = [ - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_transforms_base", - "swc_ecma_utils", - "swc_ecma_visit", -] - -[[package]] -name = "swc_ecma_transforms_macros" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8188eab297da773836ef5cf2af03ee5cca7a563e1be4b146f8141452c28cc690" -dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "swc_macros_common", - "syn 2.0.52", ] [[package]] -name = "swc_ecma_transforms_proposal" -version = "0.168.52" +name = "static_assertions" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e1f409e026be953fabb327923ebc5fdc7c664bcac036b76107834798640ed" -dependencies = [ - "either", - "rustc-hash", - "serde", - "smallvec 1.13.1", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_transforms_base", - "swc_ecma_transforms_classes", - "swc_ecma_transforms_macros", - "swc_ecma_utils", - "swc_ecma_visit", -] +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "swc_ecma_transforms_react" -version = "0.180.52" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa7f368a80f28eeaa0f529cff6fb5d7578ef10a60be25bfd2582cb3f8ff5c9e" -dependencies = [ - "base64 0.13.1", - "dashmap", - "indexmap 1.9.3", - "once_cell", - "serde", - "sha-1", - "string_enum", - "swc_atoms", - "swc_common", - "swc_config", - "swc_ecma_ast", - "swc_ecma_parser", - "swc_ecma_transforms_base", - "swc_ecma_transforms_macros", - "swc_ecma_utils", - "swc_ecma_visit", -] +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] -name = "swc_ecma_transforms_typescript" -version = "0.185.52" +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa2950c85abb4d555e092503ad2fa4f6dec0ee36a719273fb7a7bb29ead9ab6" -dependencies = [ - "ryu-js", - "serde", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_transforms_base", - "swc_ecma_transforms_react", - "swc_ecma_utils", - "swc_ecma_visit", -] +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] -name = "swc_ecma_utils" -version = "0.124.32" +name = "structmeta" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a4a0baf6cfa490666a9fe23a17490273f843d19ebc1d6ec89d64c3f8ccdb80" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" dependencies = [ - "indexmap 1.9.3", - "num_cpus", - "once_cell", - "rustc-hash", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_ecma_visit", - "tracing", - "unicode-id", + "proc-macro2", + "quote", + "structmeta-derive", + "syn 2.0.79", ] [[package]] -name = "swc_ecma_visit" -version = "0.96.10" +name = "structmeta-derive" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba962f0becf83bab12a17365dface5a4f636c9e1743d479e292b96910a753743" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ - "num-bigint", - "swc_atoms", - "swc_common", - "swc_ecma_ast", - "swc_visit", - "tracing", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "swc_eq_ignore_macros" -version = "0.1.2" +name = "strum" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a95d367e228d52484c53336991fdcf47b6b553ef835d9159db4ba40efb0ee8" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "strum_macros", ] [[package]] -name = "swc_macros_common" -version = "0.3.8" +name = "strum_macros" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a273205ccb09b51fabe88c49f3b34c5a4631c4c00a16ae20e03111d6a42e832" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.79", ] [[package]] -name = "swc_visit" -version = "0.5.7" +name = "subtle" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c337fbb2d191bf371173dea6a957f01899adb8f189c6c31b122a6cfc98fc3" -dependencies = [ - "either", - "swc_visit_macros", -] +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] -name = "swc_visit_macros" -version = "0.5.8" +name = "syn" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f322730fb82f3930a450ac24de8c98523af7d34ab8cb2f46bcb405839891a99" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "Inflector", - "pmutil", - "proc-macro2 1.0.79", - "quote 1.0.35", - "swc_macros_common", - "syn 2.0.52", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] name = "syn" -version = "0.15.44" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "syn" -version = "1.0.109" +name = "syn-solidity" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +checksum = "f3a850d65181df41b83c6be01a7d91f5e9377c43d48faa5af7d95816f437f5a3" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "unicode-ident", + "paste", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "syn" -version = "2.0.52" +name = "syn_derive" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "unicode-ident", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -8513,15 +5759,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] -name = "synstructure" -version = "0.12.6" +name = "sync_wrapper" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", - "unicode-xid 0.2.4", + "futures-core", ] [[package]] @@ -8545,53 +5788,12 @@ dependencies = [ "libc", ] -[[package]] -name = "tabled" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe9c3632da101aba5131ed63f9eed38665f8b3c68703a6bb18124835c1a5d22" -dependencies = [ - "papergrid", - "tabled_derive", - "unicode-width", -] - -[[package]] -name = "tabled_derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99f688a08b54f4f02f0a3c382aefdb7884d3d69609f785bd253dc033243e3fe4" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "take_mut" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" - [[package]] name = "tap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" -[[package]] -name = "tar" -version = "0.4.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" -dependencies = [ - "filetime", - "libc", - "xattr", -] - [[package]] name = "tempdir" version = "0.3.7" @@ -8604,74 +5806,63 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ - "cfg-if 1.0.0", - "fastrand 2.0.1", + "cfg-if", + "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", + "windows-sys 0.59.0", ] -[[package]] -name = "termtree" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" - [[package]] name = "testcontainers" -version = "0.14.0" -source = "git+https://github.com/fracek/testcontainers-rs.git?rev=98a5557e#98a5557e1d55a98ce477890d8f508aa734898064" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ef8374cea2c164699681ecc39316c3e1d953831a7a5721e36c7736d974e15fa" dependencies = [ + "async-trait", + "bollard", "bollard-stubs", - "futures 0.3.30", - "hex", - "hmac", + "bytes", + "dirs", + "docker_credential", + "either", + "futures", "log", - "rand 0.8.5", + "memchr", + "parse-display", + "pin-project-lite", "serde", "serde_json", - "sha2", -] - -[[package]] -name = "text_lines" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd5828de7deaa782e1dd713006ae96b3bee32d3279b79eb67ecf8072c059bcf" -dependencies = [ - "serde", + "serde_with", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "url", ] [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -8680,30 +5871,30 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] [[package]] -name = "thrift" -version = "0.17.0" +name = "threadpool" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" dependencies = [ - "byteorder", - "integer-encoding", - "ordered-float", + "num_cpus", ] [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -8718,9 +5909,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -8737,9 +5928,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -8752,444 +5943,151 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "mio 0.6.23", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", -] - -[[package]] -name = "tokio" -version = "1.36.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", - "bytes 1.5.0", + "bytes", "libc", - "mio 0.8.11", - "num_cpus", - "parking_lot 0.12.1", + "mio", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2", "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-codec" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "tokio-io", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = [ - "futures 0.1.31", - "tokio-executor", -] - -[[package]] -name = "tokio-executor" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", -] - -[[package]] -name = "tokio-fs" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = [ - "futures 0.1.31", - "tokio-io", - "tokio-threadpool", -] - -[[package]] -name = "tokio-io" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio 1.36.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "tokio-metrics" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112" -dependencies = [ - "futures-util", - "pin-project-lite", - "tokio 1.36.0", - "tokio-stream", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio 1.36.0", -] - -[[package]] -name = "tokio-postgres" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d340244b32d920260ae7448cb72b6e238bddc3d4f7603394e7dd46ed8e48f5b8" -dependencies = [ - "async-trait", - "byteorder", - "bytes 1.5.0", - "fallible-iterator", - "futures-channel", - "futures-util", - "log", - "parking_lot 0.12.1", - "percent-encoding", - "phf 0.11.2", - "pin-project-lite", - "postgres-protocol", - "postgres-types", - "rand 0.8.5", - "socket2 0.5.6", - "tokio 1.36.0", - "tokio-util", - "whoami", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log", - "mio 0.6.23", - "num_cpus", - "parking_lot 0.9.0", - "slab", - "tokio-executor", - "tokio-io", - "tokio-sync", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls", - "tokio 1.36.0", -] - -[[package]] -name = "tokio-socks" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51165dfa029d2a65969413a6cc96f354b86b464498702f174a4efa13608fd8c0" -dependencies = [ - "either", - "futures-util", - "thiserror", - "tokio 1.36.0", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio 1.36.0", - "tokio-util", -] - -[[package]] -name = "tokio-sync" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" -dependencies = [ - "fnv", - "futures 0.1.31", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "mio 0.6.23", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" -dependencies = [ - "crossbeam-deque", - "crossbeam-queue", - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log", - "num_cpus", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-timer" -version = "0.2.13" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "slab", - "tokio-executor", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "tokio-tungstenite" -version = "0.19.0" +name = "tokio-native-tls" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec509ac96e9a0c43427c74f003127d953a265737636129424288d27cb5c4b12c" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ - "futures-util", - "log", - "tokio 1.36.0", - "tungstenite 0.19.0", + "native-tls", + "tokio", ] [[package]] -name = "tokio-tungstenite" -version = "0.20.1" +name = "tokio-rustls" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "futures-util", - "log", - "tokio 1.36.0", - "tungstenite 0.20.1", + "rustls 0.21.12", + "tokio", ] [[package]] -name = "tokio-udp" -version = "0.1.6" +name = "tokio-rustls" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log", - "mio 0.6.23", - "tokio-codec", - "tokio-io", - "tokio-reactor", + "rustls 0.23.14", + "rustls-pki-types", + "tokio", ] [[package]] -name = "tokio-uds" -version = "0.2.7" +name = "tokio-stream" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "libc", - "log", - "mio 0.6.23", - "mio-uds", - "tokio-codec", - "tokio-io", - "tokio-reactor", + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", ] [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ - "bytes 1.5.0", + "bytes", "futures-core", - "futures-io", "futures-sink", "pin-project-lite", - "slab", - "tokio 1.36.0", - "tracing", + "tokio", ] [[package]] name = "toml" -version = "0.5.11" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", ] [[package]] name = "toml_datetime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" - -[[package]] -name = "toml_edit" -version = "0.19.15" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ - "indexmap 2.2.5", - "toml_datetime", - "winnow", + "serde", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.6.0", + "serde", + "serde_spanned", "toml_datetime", "winnow", ] [[package]] name = "tonic" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" -dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64 0.13.1", - "bytes 1.5.0", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "prost-derive", - "tokio 1.36.0", - "tokio-stream", - "tokio-util", - "tower", - "tower-layer", - "tower-service", - "tracing", - "tracing-futures", -] - -[[package]] -name = "tonic" -version = "0.9.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.7", - "bytes 1.5.0", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", + "base64 0.22.1", + "bytes", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "prost", - "rustls-native-certs", - "rustls-pemfile", - "tokio 1.36.0", - "tokio-rustls", + "rustls-native-certs 0.8.0", + "rustls-pemfile 2.2.0", + "socket2", + "tokio", + "tokio-rustls 0.26.0", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -9197,54 +6095,42 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" -dependencies = [ - "prettyplease", - "proc-macro2 1.0.79", - "prost-build", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "tonic-build" -version = "0.9.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", - "proc-macro2 1.0.79", + "proc-macro2", "prost-build", - "quote 1.0.35", - "syn 1.0.109", + "prost-types", + "quote", + "syn 2.0.79", ] [[package]] name = "tonic-health" -version = "0.9.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080964d45894b90273d2b1dd755fdd114560db8636bb41cea615213c45043c4d" +checksum = "1eaf34ddb812120f5c601162d5429933c9b527d901ab0e7f930d3147e33a09b2" dependencies = [ "async-stream", "prost", - "tokio 1.36.0", + "tokio", "tokio-stream", - "tonic 0.9.2", + "tonic", ] [[package]] name = "tonic-reflection" -version = "0.9.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0543d7092032041fbeac1f2c84304537553421a11a623c2301b12ef0264862c7" +checksum = "878d81f52e7fcfd80026b7fdb6a9b578b3c3653ba987f87f0dce4b64043cba27" dependencies = [ "prost", "prost-types", - "tokio 1.36.0", + "tokio", "tokio-stream", - "tonic 0.9.2", + "tonic", ] [[package]] @@ -9260,7 +6146,7 @@ dependencies = [ "pin-project-lite", "rand 0.8.5", "slab", - "tokio 1.36.0", + "tokio", "tokio-util", "tower-layer", "tower-service", @@ -9268,37 +6154,30 @@ dependencies = [ ] [[package]] -name = "tower-http" -version = "0.4.4" +name = "tower" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ - "base64 0.21.7", - "bitflags 2.4.2", - "bytes 1.5.0", "futures-core", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "http-range-header", - "mime", "pin-project-lite", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", - "tracing", ] [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -9306,7 +6185,6 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -9318,9 +6196,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] @@ -9333,30 +6211,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "futures 0.3.30", - "futures-task", - "pin-project", - "tokio 0.1.22", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -9370,16 +6224,20 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.18.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" +checksum = "a9784ed4da7d921bc8df6963f8c80a0e4ce34ba6ba76668acadd3edbd985ff3b" dependencies = [ + "js-sys", "once_cell", "opentelemetry", + "opentelemetry_sdk", + "smallvec", "tracing", "tracing-core", - "tracing-log 0.1.4", + "tracing-log", "tracing-subscriber", + "web-time", ] [[package]] @@ -9399,204 +6257,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", - "nu-ansi-term", + "nu-ansi-term 0.46.0", "once_cell", "regex", "serde", "serde_json", "sharded-slab", - "smallvec 1.13.1", + "smallvec", "thread_local", "tracing", "tracing-core", - "tracing-log 0.2.0", + "tracing-log", "tracing-serde", ] -[[package]] -name = "tracing-tree" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ec6adcab41b1391b08a308cc6302b79f8095d1673f6947c2dc65ffb028b0b2d" -dependencies = [ - "nu-ansi-term", - "tracing-core", - "tracing-log 0.1.4", - "tracing-subscriber", -] - -[[package]] -name = "treediff" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d127780145176e2b5d16611cc25a900150e86e9fd79d3bde6ff3a37359c9cb5" -dependencies = [ - "serde_json", -] - -[[package]] -name = "trust-dns-proto" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" -dependencies = [ - "async-trait", - "cfg-if 1.0.0", - "data-encoding", - "enum-as-inner 0.4.0", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "log", - "rand 0.8.5", - "smallvec 1.13.1", - "thiserror", - "tinyvec", - "tokio 1.36.0", - "url", -] - -[[package]] -name = "trust-dns-proto" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" -dependencies = [ - "async-trait", - "cfg-if 1.0.0", - "data-encoding", - "enum-as-inner 0.5.1", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand 0.8.5", - "serde", - "smallvec 1.13.1", - "thiserror", - "tinyvec", - "tokio 1.36.0", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" -dependencies = [ - "cfg-if 1.0.0", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot 0.12.1", - "resolv-conf", - "smallvec 1.13.1", - "thiserror", - "tokio 1.36.0", - "trust-dns-proto 0.21.2", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" -dependencies = [ - "cfg-if 1.0.0", - "futures-util", - "ipconfig", - "lazy_static", - "lru-cache", - "parking_lot 0.12.1", - "resolv-conf", - "serde", - "smallvec 1.13.1", - "thiserror", - "tokio 1.36.0", - "tracing", - "trust-dns-proto 0.22.0", -] - [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "tungstenite" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15fba1a6d6bb030745759a9a2a588bfe8490fc8b4751a277db3a0be1c9ebbf67" -dependencies = [ - "byteorder", - "bytes 1.5.0", - "data-encoding", - "http 0.2.12", - "httparse", - "log", - "rand 0.8.5", - "sha1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "tungstenite" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" -dependencies = [ - "byteorder", - "bytes 1.5.0", - "data-encoding", - "http 0.2.12", - "httparse", - "log", - "rand 0.8.5", - "sha1", - "thiserror", - "url", - "utf-8", -] - [[package]] name = "twox-hash" version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "rand 0.8.5", "static_assertions", ] -[[package]] -name = "typed-arena" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" - -[[package]] -name = "typed-builder" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 1.0.109", -] - [[package]] name = "typenum" version = "1.17.0" @@ -9604,57 +6295,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unic-char-property" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221" -dependencies = [ - "unic-char-range", -] - -[[package]] -name = "unic-char-range" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc" - -[[package]] -name = "unic-common" -version = "0.9.0" +name = "ucd-trie" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] -name = "unic-ucd-ident" -version = "0.9.0" +name = "uint" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e230a37c0381caa9219d67cf063aa3a375ffed5bf541a452db16e744bdab6987" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ - "unic-char-property", - "unic-char-range", - "unic-ucd-version", + "byteorder", + "crunchy", + "hex", + "static_assertions", ] [[package]] -name = "unic-ucd-version" -version = "0.9.0" +name = "unarray" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4" -dependencies = [ - "unic-common", -] +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" @@ -9667,82 +6329,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - -[[package]] -name = "unicode-id" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1b6def86329695390197b82c1e244a54a131ceb66c996f2088a3876e2ae083f" - -[[package]] -name = "unicode-id-start" -version = "1.1.2" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8f73150333cb58412db36f2aca8f2875b013049705cc77b94ded70a1ab1f5da" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" - [[package]] name = "unicode-width" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" - -[[package]] -name = "unicode-xid" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" [[package]] name = "unicode-xid" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" - -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - -[[package]] -name = "unsafe-libyaml" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" - -[[package]] -name = "untrusted" -version = "0.7.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "untrusted" @@ -9752,12 +6368,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna", "percent-encoding", "serde", ] @@ -9768,25 +6384,6 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" -[[package]] -name = "urlpattern" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9bd5ff03aea02fa45b13a7980151fe45009af1980ba69f651ec367121a31609" -dependencies = [ - "derive_more", - "regex", - "serde", - "unic-ucd-ident", - "url", -] - -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "utf8-width" version = "0.1.7" @@ -9795,9 +6392,9 @@ checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -9805,31 +6402,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.12", + "getrandom", "serde", ] [[package]] name = "uuid" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" -dependencies = [ - "getrandom 0.2.12", - "serde", -] - -[[package]] -name = "v8" -version = "0.81.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75f5f378b9b54aff3b10da8170d26af4cfd217f644cf671badcd13af5db4beb" -dependencies = [ - "bitflags 1.3.2", - "fslock", - "once_cell", - "which", -] +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" [[package]] name = "valuable" @@ -9845,9 +6426,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "vsimd" @@ -9856,40 +6437,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" [[package]] -name = "vte" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5022b5fbf9407086c180e9557be968742d839e68346af7792b8592489732197" -dependencies = [ - "arrayvec", - "utf8parse", - "vte_generate_state_changes", -] - -[[package]] -name = "vte_generate_state_changes" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" -dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", -] - -[[package]] -name = "waker-fn" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" - -[[package]] -name = "walkdir" -version = "2.5.0" +name = "wait-timeout" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" dependencies = [ - "same-file", - "winapi-util", + "libc", ] [[package]] @@ -9901,87 +6454,45 @@ dependencies = [ "try-lock", ] -[[package]] -name = "warp" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" -dependencies = [ - "bytes 1.5.0", - "futures-channel", - "futures-util", - "headers", - "http 0.2.12", - "hyper 0.14.28", - "log", - "mime", - "mime_guess", - "multer", - "percent-encoding", - "pin-project", - "rustls-pemfile", - "scoped-tls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio 1.36.0", - "tokio-stream", - "tokio-tungstenite 0.20.1", - "tokio-util", - "tower-service", - "tracing", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" -[[package]] -name = "wasite" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" - [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -9989,51 +6500,48 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ - "quote 1.0.35", + "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2", + "quote", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] -name = "wasm-streams" -version = "0.4.0" +name = "web-sys" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ - "futures-util", "js-sys", "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", ] [[package]] -name = "web-sys" -version = "0.3.69" +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -10046,40 +6554,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - -[[package]] -name = "whoami" -version = "1.5.1" +name = "webpki-roots" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ - "redox_syscall 0.4.1", - "wasite", - "web-sys", + "rustls-pki-types", ] -[[package]] -name = "widestring" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -10090,27 +6572,12 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -10123,7 +6590,37 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -10141,7 +6638,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -10161,17 +6667,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -10182,9 +6689,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -10194,9 +6701,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -10206,9 +6713,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -10218,9 +6731,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -10230,9 +6743,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -10242,9 +6755,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -10254,15 +6767,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.40" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -10273,51 +6786,10 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] -[[package]] -name = "winres" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b68db261ef59e9e52806f688020631e987592bd83619edccda9c47d42cde4f6c" -dependencies = [ - "toml", -] - -[[package]] -name = "wiremock" -version = "0.5.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a3a53eaf34f390dd30d7b1b078287dd05df2aa2e21a589ccb80f5c7253c2e9" -dependencies = [ - "assert-json-diff", - "async-trait", - "base64 0.21.7", - "deadpool", - "futures 0.3.30", - "futures-timer", - "http-types", - "hyper 0.14.28", - "log", - "once_cell", - "regex", - "serde", - "serde_json", - "tokio 1.36.0", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "wyz" version = "0.5.1" @@ -10328,87 +6800,76 @@ dependencies = [ ] [[package]] -name = "x25519-dalek" -version = "2.0.1" +name = "xmlparser" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" -dependencies = [ - "curve25519-dalek 4.1.2", - "rand_core 0.6.4", - "serde", - "zeroize", -] +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" [[package]] -name = "x509-parser" -version = "0.15.1" +name = "zerocopy" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "asn1-rs", - "data-encoding", - "der-parser", - "lazy_static", - "nom 7.1.3", - "oid-registry", - "rusticata-macros", - "thiserror", - "time", + "byteorder", + "zerocopy-derive", ] [[package]] -name = "xattr" -version = "1.3.1" +name = "zerocopy-derive" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ - "libc", - "linux-raw-sys", - "rustix", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "xmlparser" -version = "0.13.6" +name = "zeroize" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] [[package]] -name = "zerocopy" -version = "0.7.32" +name = "zeroize_derive" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "zerocopy-derive", + "proc-macro2", + "quote", + "syn 2.0.79", ] [[package]] -name = "zerocopy-derive" -version = "0.7.32" +name = "zstd" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "zstd-safe", ] [[package]] -name = "zeroize" -version = "1.7.0" +name = "zstd-safe" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ - "zeroize_derive", + "zstd-sys", ] [[package]] -name = "zeroize_derive" -version = "1.4.2" +name = "zstd-sys" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ - "proc-macro2 1.0.79", - "quote 1.0.35", - "syn 2.0.52", + "cc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 1b59b04c..7b2f464b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,36 +7,30 @@ license = "Apache-2.0" [workspace] resolver = "2" members = [ - "core", "observability", - "node", - "sdk", + "etcd", + "etcd-dbg", + "protocol", + "common", + "benchmark", + # Chains + "beaconchain", + "evm", "starknet", - "script", - "sinks/sink-common", - "sinks/sink-options-derive", - "sinks/sink-options-derive-tests", - "sinks/sink-console", - "sinks/sink-webhook", - "sinks/sink-mongo", - "sinks/sink-parquet", - "sinks/sink-postgres", - "runners/runner-common", - "runners/runner-local", - "operator", - "cli", ] +[profile.maxperf] +inherits = "release" +codegen-units = 1 +lto = "fat" +incremental = false + [workspace.dependencies] -assert_matches = "1.5.0" -anstyle = "1.0.1" -arrayvec = "0.7.2" -async-compression = "0.4" -async-stream = "0.3.5" -async-trait = "0.1.57" -byteorder = "1.4.3" -byte-unit = "4.0.14" -clap = { version = "4.3.3", features = [ +alloy-primitives = "0.8" +alloy-eips = "0.3.6" +bytes = { version = "1.7.1", features = ["serde"] } +byte-unit = "5.1.4" +clap = { version = "4.5.13", features = [ "derive", "env", "cargo", @@ -44,72 +38,59 @@ clap = { version = "4.3.3", features = [ "color", "unstable-styles", ] } -ctrlc = { version = "3.2.3", features = ["termination"] } -dirs = "4.0.0" -dotenvy = "0.15.7" -error-stack = "0.4.1" -futures = "0.3.23" -futures-util = "0.3.26" -governor = "0.6.0" +ctrlc = { version = "3.4.5", features = ["termination"] } +dirs = "5.0.1" +error-stack = "0.5.0" +etcd-client = { version = "0.14.0", features = ["tls", "tls-roots"] } +foyer = { git = "https://github.com/foyer-rs/foyer.git", rev = "d49c480" } # 0.12-dev +futures = "0.3.30" hex = { version = "0.4.3", features = ["serde"] } -http = "0.2.9" -hyper = "0.14.20" -lazy_static = "1.4.0" -jemallocator = { version = "0.5.0" } -mockall = "0.11.4" -opentelemetry = { version = "0.18.0", features = [ +memmap2 = "0.9.4" +mimalloc = "0.1.43" +opentelemetry = { version = "0.24.0", features = ["trace", "metrics"] } +opentelemetry_sdk = { version = "0.24.1", features = [ "trace", "metrics", "rt-tokio", ] } -opentelemetry-otlp = { version = "0.11.0", features = [ +opentelemetry-otlp = { version = "0.17.0", features = [ "trace", "metrics", "grpc-tonic", ] } -pbjson = "0.5.1" -pbjson-build = "0.5.1" -pbjson-types = "0.5.1" -pin-project = "1.0.12" -prost = "0.11.0" -reqwest = { version = "0.11.16", default-features = false, features = [ +pin-project = "1.1.5" +prost = "0.13.1" +prost-types = "0.13.1" +rand = "0.8.5" +reqwest = { version = "0.12.5", default-features = false, features = [ "json", - "serde_json", "rustls-tls", ] } -regex = "1.9.1" -serde = "1.0.155" -serde_json = "1.0.94" -starknet = { git = "https://github.com/xJonathanLEI/starknet-rs", rev = "7153d0e42" } -# starknet = { git = "https://github.com/fracek/starknet-rs", rev = "e6c4a21a7ce5" } -thiserror = "1.0.32" -tempfile = "3.3.0" +rkyv = { version = "0.8.8", features = ["unaligned"] } +roaring = "0.10.6" +serde = { version = "1.0.205", features = ["derive"] } +serde_json = "1.0.122" +serde_with = "3.9.0" +tempfile = "3.13.0" tempdir = "0.3.7" -testcontainers = { git = "https://github.com/fracek/testcontainers-rs.git", rev = "98a5557e" } -tokio = { version = "1.20.1", features = ["full"] } -tokio-stream = { version = "0.1.10", features = ["sync", "net"] } -tokio-util = "0.7.4" -tonic = { version = "0.9.0", features = ["tls", "tls-roots", "prost"] } -tonic-build = "0.9.0" -tonic-health = "0.9.0" -tonic-reflection = "0.9.0" -tower = "0.4.13" -tracing = { version = "0.1.36", features = [ +testcontainers = "0.22.0" +time = { version = "0.3.36", features = ["formatting", "local-offset"] } +tokio = { version = "1.39.2", features = ["full"] } +tokio-stream = { version = "0.1.15", features = ["sync", "net"] } +tokio-util = "0.7.11" +tonic = { version = "0.12.1", features = ["tls", "tls-roots", "prost"] } +tonic-build = "0.12.2" +tonic-health = "0.12.2" +tonic-reflection = "0.12.2" +tracing = { version = "0.1.40", features = [ "max_level_trace", "release_max_level_debug", ] } -tracing-futures = { version = "0.2.5", features = ["tokio", "futures-03"] } -tracing-opentelemetry = "0.18.0" -tracing-subscriber = { version = "0.3.15", features = [ +tracing-opentelemetry = "0.25.0" +tracing-subscriber = { version = "0.3.18", features = [ "std", "env-filter", "json", ] } -tracing-tree = "0.2.2" -quickcheck = "1.0.3" -quickcheck_macros = "1.0.0" -warp = "0.3.5" - -[patch.crates-io] -# https://github.com/tov/libffi-rs/pull/80 -libffi-sys = { git = "https://github.com/fracek/libffi-rs.git", rev = "653781aa9b7a7ac1682e7f8cb405a2e90afc341d" } +url = "2.5" +zstd = "0.13.2" diff --git a/beaconchain/Cargo.toml b/beaconchain/Cargo.toml new file mode 100644 index 00000000..8bb6bfa4 --- /dev/null +++ b/beaconchain/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "apibara-dna-beaconchain" +version = "0.0.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true +license.workspace = true + +[lib] +name = "apibara_dna_beaconchain" +path = "src/lib.rs" + +[[bin]] +name = "apibara-dna-beaconchain" +path = "src/bin.rs" + +[dependencies] +alloy-primitives.workspace = true +alloy-eips.workspace = true +alloy-consensus = { version = "0.3.6", features = ["k256"] } +alloy-rpc-types-beacon = "0.3.6" +apibara-observability = { path = "../observability" } +apibara-dna-common = { path = "../common" } +apibara-dna-protocol = { path = "../protocol" } +byte-unit.workspace = true +clap.workspace = true +ctrlc.workspace = true +error-stack.workspace = true +futures.workspace = true +hex.workspace = true +mimalloc.workspace = true +prost.workspace = true +prost-types.workspace = true +reqwest.workspace = true +roaring.workspace = true +rkyv.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +tonic.workspace = true +tokio.workspace = true +tokio-util.workspace = true +tracing.workspace = true +url.workspace = true diff --git a/beaconchain/src/bin.rs b/beaconchain/src/bin.rs new file mode 100644 index 00000000..01ed675c --- /dev/null +++ b/beaconchain/src/bin.rs @@ -0,0 +1,36 @@ +use apibara_dna_beaconchain::{cli::Cli, error::BeaconChainError}; +use apibara_observability::init_opentelemetry; +use clap::Parser; +use error_stack::{Result, ResultExt}; +use mimalloc::MiMalloc; +use tokio_util::sync::CancellationToken; +use tracing::info; + +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +#[tokio::main] +async fn main() -> Result<(), BeaconChainError> { + let args = Cli::parse(); + run_with_args(args).await +} + +async fn run_with_args(args: Cli) -> Result<(), BeaconChainError> { + init_opentelemetry(env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")) + .change_context(BeaconChainError) + .attach_printable("failed to initialize opentelemetry")?; + + let ct = CancellationToken::new(); + + ctrlc::set_handler({ + let ct = ct.clone(); + move || { + info!("SIGINT received"); + ct.cancel(); + } + }) + .change_context(BeaconChainError) + .attach_printable("failed to set SIGINT handler")?; + + args.run(ct).await +} diff --git a/beaconchain/src/cli/dbg/mod.rs b/beaconchain/src/cli/dbg/mod.rs new file mode 100644 index 00000000..b9c8e23a --- /dev/null +++ b/beaconchain/src/cli/dbg/mod.rs @@ -0,0 +1,5 @@ +//! Debug commands. + +mod rpc; + +pub use self::rpc::DebugRpcCommand; diff --git a/beaconchain/src/cli/dbg/rpc.rs b/beaconchain/src/cli/dbg/rpc.rs new file mode 100644 index 00000000..3c8cf6f7 --- /dev/null +++ b/beaconchain/src/cli/dbg/rpc.rs @@ -0,0 +1,203 @@ +use std::{ + io::BufWriter, + path::{Path, PathBuf}, + time::Instant, +}; + +use clap::Subcommand; +use error_stack::{Result, ResultExt}; +use serde::{Deserialize, Serialize}; +use tracing::info; + +use crate::{ + cli::rpc::RpcArgs, + error::BeaconChainError, + provider::{http::BlockId, models}, +}; + +#[derive(Subcommand, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum DebugRpcCommand { + /// Get the header of a block. + GetHeader { + #[clap(flatten)] + rpc: RpcArgs, + /// The block ID. + #[arg(long, env, default_value = "head")] + block_id: String, + /// Write the response to a JSON file. + #[arg(long)] + json: Option, + }, + /// Get the block. + GetBlock { + #[clap(flatten)] + rpc: RpcArgs, + /// The block ID. + #[arg(long, env, default_value = "head")] + block_id: String, + /// Write the response to a JSON file. + #[arg(long)] + json: Option, + }, + /// Get the blob sidecar. + GetBlobSidecar { + #[clap(flatten)] + rpc: RpcArgs, + /// The block ID. + #[arg(long, env, default_value = "head")] + block_id: String, + /// Write the response to a JSON file. + #[arg(long)] + json: Option, + }, + /// Get the validators. + GetValidators { + #[clap(flatten)] + rpc: RpcArgs, + /// The block ID. + #[arg(long, env, default_value = "head")] + block_id: String, + /// Write the response to a JSON file. + #[arg(long)] + json: Option, + }, +} + +/// Data needed to assemble a block. +#[derive(Serialize, Deserialize)] +pub(crate) enum JsonBlock { + Missed { slot: u64 }, + Proposed(ProposedBlock), +} + +#[derive(Serialize, Deserialize)] +pub(crate) struct ProposedBlock { + pub block: models::BeaconBlock, + pub blob_sidecars: Vec, + pub validators: Vec, +} + +impl DebugRpcCommand { + pub async fn run(self) -> Result<(), BeaconChainError> { + let rpc_provider = self + .rpc_provider() + .change_context(BeaconChainError) + .attach_printable("failed to create RPC provider")?; + + let block_id = self.block_id()?; + + let start = Instant::now(); + let elapsed = match self { + DebugRpcCommand::GetHeader { json, .. } => { + info!(block_id = ?block_id, "getting header"); + let header = rpc_provider + .get_header(block_id) + .await + .change_context(BeaconChainError)?; + info!("received {:#?}", header); + + let elapsed = start.elapsed(); + if let Some(json_path) = json { + write_json(json_path, &header)?; + } + elapsed + } + DebugRpcCommand::GetBlock { json, .. } => { + info!(block_id = ?block_id, "getting block"); + let block = rpc_provider + .get_block(block_id) + .await + .change_context(BeaconChainError)?; + info!("received {:#?}", block); + + let elapsed = start.elapsed(); + if let Some(json_path) = json { + write_json(json_path, &block)?; + } + elapsed + } + DebugRpcCommand::GetBlobSidecar { json, .. } => { + info!(block_id = ?block_id, "getting blob sidecar"); + let sidecar = rpc_provider + .get_blob_sidecar(block_id) + .await + .change_context(BeaconChainError)?; + info!("received {:#?}", sidecar); + + let elapsed = start.elapsed(); + if let Some(json_path) = json { + write_json(json_path, &sidecar)?; + } + elapsed + } + DebugRpcCommand::GetValidators { json, .. } => { + info!(block_id = ?block_id, "getting validators"); + let validators = rpc_provider + .get_validators(block_id) + .await + .change_context(BeaconChainError)?; + info!("received {:#?}", validators); + + let elapsed = start.elapsed(); + if let Some(json_path) = json { + write_json(json_path, &validators)?; + } + elapsed + } + }; + + info!(elapsed = ?elapsed, "debug rpc command completed"); + + Ok(()) + } + + fn rpc_provider(&self) -> Result { + match self { + DebugRpcCommand::GetHeader { rpc, .. } => rpc.to_beacon_api_provider(), + DebugRpcCommand::GetBlock { rpc, .. } => rpc.to_beacon_api_provider(), + DebugRpcCommand::GetBlobSidecar { rpc, .. } => rpc.to_beacon_api_provider(), + DebugRpcCommand::GetValidators { rpc, .. } => rpc.to_beacon_api_provider(), + } + } + + fn block_id(&self) -> Result { + let block_id = match self { + DebugRpcCommand::GetHeader { block_id, .. } => block_id, + DebugRpcCommand::GetBlock { block_id, .. } => block_id, + DebugRpcCommand::GetBlobSidecar { block_id, .. } => block_id, + DebugRpcCommand::GetValidators { block_id, .. } => block_id, + }; + + match block_id.as_str() { + "head" => Ok(BlockId::Head), + "finalized" => Ok(BlockId::Finalized), + str_value => { + if let Ok(slot) = str_value.parse::() { + return Ok(BlockId::Slot(slot)); + } + if let Ok(block_root) = str_value.parse::() { + return Ok(BlockId::BlockRoot(block_root)); + } + Err(BeaconChainError) + .attach_printable_lazy(|| format!("invalid block id: {}", str_value)) + } + } + } +} + +fn write_json(path: impl AsRef, data: &impl Serialize) -> Result<(), BeaconChainError> { + use std::fs::File; + use std::io::Write; + + let path = path.as_ref(); + let file = File::create(path).change_context(BeaconChainError)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer_pretty(&mut writer, data) + .change_context(BeaconChainError) + .attach_printable("failed to write JSON") + .attach_printable_lazy(|| format!("path: {}", path.display()))?; + writer.flush().change_context(BeaconChainError)?; + + Ok(()) +} diff --git a/beaconchain/src/cli/mod.rs b/beaconchain/src/cli/mod.rs new file mode 100644 index 00000000..bdf10a2b --- /dev/null +++ b/beaconchain/src/cli/mod.rs @@ -0,0 +1,40 @@ +mod dbg; +mod rpc; +mod start; + +use clap::{Parser, Subcommand}; +use error_stack::Result; +use start::StartCommand; +use tokio_util::sync::CancellationToken; + +use crate::error::BeaconChainError; + +use self::dbg::DebugRpcCommand; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Cli { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +pub enum Command { + /// Start the Beaconchain DNA server. + Start(Box), + /// Debug command for the Beacon RPC. + #[command(name = "dbg-rpc")] + DebugRpc { + #[clap(subcommand)] + command: DebugRpcCommand, + }, +} + +impl Cli { + pub async fn run(self, ct: CancellationToken) -> Result<(), BeaconChainError> { + match self.command { + Command::Start(command) => command.run(ct).await, + Command::DebugRpc { command } => command.run().await, + } + } +} diff --git a/beaconchain/src/cli/rpc.rs b/beaconchain/src/cli/rpc.rs new file mode 100644 index 00000000..96e57368 --- /dev/null +++ b/beaconchain/src/cli/rpc.rs @@ -0,0 +1,89 @@ +use std::time::Duration; + +use clap::Args; +use error_stack::{Result, ResultExt}; +use reqwest::{ + header::{HeaderMap, HeaderName, HeaderValue}, + Url, +}; + +use crate::{ + error::BeaconChainError, + provider::http::{BeaconApiProvider, BeaconApiProviderOptions}, +}; + +#[derive(Args, Clone, Debug)] +pub struct RpcArgs { + /// Beacon RPC URL. + #[arg( + long = "rpc.url", + env = "BEACON_RPC_URL", + default_value = "http://localhost:3500" + )] + pub rpc_url: String, + + /// Timeout for normal requests. + #[arg( + long = "rpc.timeout-sec", + env = "BEACON_RPC_TIMEOUT_SEC", + default_value = "20" + )] + pub rpc_timeout_sec: u64, + + /// Timeout for validators requests. + #[arg( + long = "rpc.validators-timeout-sec", + env = "BEACON_RPC_VALIDATORS_TIMEOUT_SEC", + default_value = "180" + )] + pub rpc_validators_timeout_sec: u64, + + /// Headers to send with the requests. + #[arg(long = "rpc.headers", env = "BEACON_RPC_HEADERS")] + pub rpc_headers: Vec, +} + +impl RpcArgs { + pub fn to_beacon_api_provider(&self) -> Result { + let url = self + .rpc_url + .parse::() + .change_context(BeaconChainError) + .attach_printable("failed to parse RPC URL") + .attach_printable_lazy(|| format!("url: {}", self.rpc_url))?; + + let headers = { + let mut headers = HeaderMap::default(); + + for kv in self.rpc_headers.iter() { + let (key, value) = kv + .split_once(':') + .ok_or(BeaconChainError) + .attach_printable("invalid header") + .attach_printable_lazy(|| format!("header: {}", kv))?; + + headers.insert( + key.parse::() + .change_context(BeaconChainError) + .attach_printable("invalid header name") + .attach_printable_lazy(|| format!("header name: {}", key))?, + value + .parse::() + .change_context(BeaconChainError) + .attach_printable("invalid header value") + .attach_printable_lazy(|| format!("header value: {}", value))?, + ); + } + + headers + }; + + let options = BeaconApiProviderOptions { + timeout: Duration::from_secs(self.rpc_timeout_sec), + validators_timeout: Duration::from_secs(self.rpc_validators_timeout_sec), + headers, + }; + + Ok(BeaconApiProvider::new(url, options)) + } +} diff --git a/beaconchain/src/cli/start.rs b/beaconchain/src/cli/start.rs new file mode 100644 index 00000000..37e073cd --- /dev/null +++ b/beaconchain/src/cli/start.rs @@ -0,0 +1,27 @@ +use apibara_dna_common::{run_server, StartArgs}; +use clap::Args; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use crate::{cli::rpc::RpcArgs, error::BeaconChainError, BeaconChainChainSupport}; + +#[derive(Args, Debug)] +pub struct StartCommand { + #[clap(flatten)] + rpc: RpcArgs, + #[clap(flatten)] + start: StartArgs, +} + +impl StartCommand { + pub async fn run(self, ct: CancellationToken) -> Result<(), BeaconChainError> { + info!("Starting Beaconchain DNA server"); + let provider = self.rpc.to_beacon_api_provider()?; + let beaconchain_chain = BeaconChainChainSupport::new(provider); + + run_server(beaconchain_chain, self.start, ct) + .await + .change_context(BeaconChainError) + } +} diff --git a/beaconchain/src/error.rs b/beaconchain/src/error.rs new file mode 100644 index 00000000..1e38a297 --- /dev/null +++ b/beaconchain/src/error.rs @@ -0,0 +1,10 @@ +#[derive(Debug, Clone)] +pub struct BeaconChainError; + +impl error_stack::Context for BeaconChainError {} + +impl std::fmt::Display for BeaconChainError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Beacon Chain error") + } +} diff --git a/beaconchain/src/filter/blob.rs b/beaconchain/src/filter/blob.rs new file mode 100644 index 00000000..0ff3dcf7 --- /dev/null +++ b/beaconchain/src/filter/blob.rs @@ -0,0 +1,23 @@ +use apibara_dna_common::query::Filter; +use apibara_dna_protocol::beaconchain; + +use crate::fragment::{BLOB_FRAGMENT_ID, TRANSACTION_FRAGMENT_ID}; + +use super::helpers::FragmentFilterExt; + +impl FragmentFilterExt for beaconchain::BlobFilter { + fn compile_to_filter(&self) -> tonic::Result { + let mut joins = Vec::new(); + + if let Some(true) = self.include_transaction { + joins.push(TRANSACTION_FRAGMENT_ID); + } + + Ok(Filter { + filter_id: self.id, + fragment_id: BLOB_FRAGMENT_ID, + conditions: Vec::default(), + joins, + }) + } +} diff --git a/beaconchain/src/filter/helpers.rs b/beaconchain/src/filter/helpers.rs new file mode 100644 index 00000000..1a264c03 --- /dev/null +++ b/beaconchain/src/filter/helpers.rs @@ -0,0 +1,9 @@ +use apibara_dna_common::query::{BlockFilter, Filter}; + +pub trait BlockFilterExt { + fn compile_to_block_filter(&self) -> tonic::Result; +} + +pub trait FragmentFilterExt { + fn compile_to_filter(&self) -> tonic::Result; +} diff --git a/beaconchain/src/filter/mod.rs b/beaconchain/src/filter/mod.rs new file mode 100644 index 00000000..e2c8c1bd --- /dev/null +++ b/beaconchain/src/filter/mod.rs @@ -0,0 +1,72 @@ +mod blob; +mod helpers; +mod transaction; +mod validator; + +use apibara_dna_common::{data_stream::BlockFilterFactory, query::BlockFilter}; +use apibara_dna_protocol::beaconchain; +use prost::Message; + +use self::helpers::{BlockFilterExt, FragmentFilterExt}; + +pub struct BeaconChainFilterFactory; + +impl BlockFilterFactory for BeaconChainFilterFactory { + fn create_block_filter( + &self, + filters: &[Vec], + ) -> tonic::Result, tonic::Status> { + let proto_filters = filters + .iter() + .map(|bytes| beaconchain::Filter::decode(bytes.as_slice())) + .collect::, _>>() + .map_err(|_| tonic::Status::invalid_argument("failed to decode filter"))?; + + if proto_filters.is_empty() { + return Err(tonic::Status::invalid_argument("no filters provided")); + } + + if proto_filters.len() > 5 { + return Err(tonic::Status::invalid_argument(format!( + "too many filters ({} > 5)", + proto_filters.len(), + ))); + } + + proto_filters + .iter() + .map(BlockFilterExt::compile_to_block_filter) + .collect() + } +} + +impl BlockFilterExt for beaconchain::Filter { + fn compile_to_block_filter(&self) -> tonic::Result { + let mut block_filter = BlockFilter::default(); + + if self.header.map(|h| h.always()).unwrap_or(false) { + block_filter.set_always_include_header(true); + } + + for filter in self.transactions.iter() { + let filter = filter.compile_to_filter()?; + block_filter.add_filter(filter); + } + + for filter in self.blobs.iter() { + let filter = filter.compile_to_filter()?; + block_filter.add_filter(filter); + } + + for filter in self.validators.iter() { + let filter = filter.compile_to_filter()?; + block_filter.add_filter(filter); + } + + if !block_filter.always_include_header && block_filter.is_empty() { + return Err(tonic::Status::invalid_argument("no filters provided")); + } + + Ok(block_filter) + } +} diff --git a/beaconchain/src/filter/transaction.rs b/beaconchain/src/filter/transaction.rs new file mode 100644 index 00000000..5b7bc0fb --- /dev/null +++ b/beaconchain/src/filter/transaction.rs @@ -0,0 +1,52 @@ +use apibara_dna_common::{ + index::ScalarValue, + query::{Condition, Filter}, +}; +use apibara_dna_protocol::beaconchain; + +use crate::fragment::{ + BLOB_FRAGMENT_ID, INDEX_TRANSACTION_BY_CREATE, INDEX_TRANSACTION_BY_FROM_ADDRESS, + INDEX_TRANSACTION_BY_TO_ADDRESS, TRANSACTION_FRAGMENT_ID, +}; + +use super::helpers::FragmentFilterExt; + +impl FragmentFilterExt for beaconchain::TransactionFilter { + fn compile_to_filter(&self) -> tonic::Result { + let mut conditions = Vec::new(); + + if let Some(from) = self.from.as_ref() { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_FROM_ADDRESS, + key: ScalarValue::B160(from.to_bytes()), + }); + } + + if let Some(to) = self.to.as_ref() { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_TO_ADDRESS, + key: ScalarValue::B160(to.to_bytes()), + }); + } + + if let Some(true) = self.create { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_CREATE, + key: ScalarValue::Bool(true), + }); + } + + let mut joins = Vec::new(); + + if let Some(true) = self.include_blob { + joins.push(BLOB_FRAGMENT_ID); + } + + Ok(Filter { + filter_id: self.id, + fragment_id: TRANSACTION_FRAGMENT_ID, + conditions, + joins, + }) + } +} diff --git a/beaconchain/src/filter/validator.rs b/beaconchain/src/filter/validator.rs new file mode 100644 index 00000000..ea35dbbc --- /dev/null +++ b/beaconchain/src/filter/validator.rs @@ -0,0 +1,36 @@ +use apibara_dna_common::{ + index::ScalarValue, + query::{Condition, Filter}, +}; +use apibara_dna_protocol::beaconchain; + +use crate::fragment::{INDEX_VALIDATOR_BY_INDEX, INDEX_VALIDATOR_BY_STATUS, VALIDATOR_FRAGMENT_ID}; + +use super::helpers::FragmentFilterExt; + +impl FragmentFilterExt for beaconchain::ValidatorFilter { + fn compile_to_filter(&self) -> tonic::Result { + let mut conditions = Vec::new(); + + if let Some(index) = self.validator_index { + conditions.push(Condition { + index_id: INDEX_VALIDATOR_BY_INDEX, + key: ScalarValue::Uint32(index), + }); + } + + if let Some(status) = self.status { + conditions.push(Condition { + index_id: INDEX_VALIDATOR_BY_STATUS, + key: ScalarValue::Int32(status), + }); + } + + Ok(Filter { + filter_id: self.id, + fragment_id: VALIDATOR_FRAGMENT_ID, + conditions, + joins: Vec::default(), + }) + } +} diff --git a/beaconchain/src/fragment.rs b/beaconchain/src/fragment.rs new file mode 100644 index 00000000..dd4ceba5 --- /dev/null +++ b/beaconchain/src/fragment.rs @@ -0,0 +1,19 @@ +//! Fragment constants. + +// Make sure the fragment IDs match the field tags in the protobuf Block message. + +pub const TRANSACTION_FRAGMENT_ID: u8 = 2; +pub const TRANSACTION_FRAGMENT_NAME: &str = "transaction"; + +pub const VALIDATOR_FRAGMENT_ID: u8 = 3; +pub const VALIDATOR_FRAGMENT_NAME: &str = "validator"; + +pub const BLOB_FRAGMENT_ID: u8 = 4; +pub const BLOB_FRAGMENT_NAME: &str = "blob"; + +pub const INDEX_TRANSACTION_BY_FROM_ADDRESS: u8 = 0; +pub const INDEX_TRANSACTION_BY_TO_ADDRESS: u8 = 1; +pub const INDEX_TRANSACTION_BY_CREATE: u8 = 2; + +pub const INDEX_VALIDATOR_BY_INDEX: u8 = 0; +pub const INDEX_VALIDATOR_BY_STATUS: u8 = 1; diff --git a/beaconchain/src/ingestion.rs b/beaconchain/src/ingestion.rs new file mode 100644 index 00000000..8b5c8756 --- /dev/null +++ b/beaconchain/src/ingestion.rs @@ -0,0 +1,561 @@ +//! Block ingestion helpers. + +use alloy_eips::eip2718::Decodable2718; +use apibara_dna_common::{ + chain::BlockInfo, + fragment::{ + Block, BodyFragment, HeaderFragment, Index, IndexFragment, IndexGroupFragment, Join, + JoinFragment, JoinGroupFragment, + }, + index::{BitmapIndexBuilder, ScalarValue}, + ingestion::{BlockIngestion, IngestionError}, + join::{JoinToManyIndex, JoinToManyIndexBuilder, JoinToOneIndex, JoinToOneIndexBuilder}, + Cursor, Hash, +}; +use error_stack::{FutureExt, Result, ResultExt}; +use prost::Message; +use tracing::Instrument; + +use crate::{ + fragment::{ + BLOB_FRAGMENT_ID, BLOB_FRAGMENT_NAME, INDEX_TRANSACTION_BY_CREATE, + INDEX_TRANSACTION_BY_FROM_ADDRESS, INDEX_TRANSACTION_BY_TO_ADDRESS, + INDEX_VALIDATOR_BY_INDEX, INDEX_VALIDATOR_BY_STATUS, TRANSACTION_FRAGMENT_ID, + TRANSACTION_FRAGMENT_NAME, VALIDATOR_FRAGMENT_ID, VALIDATOR_FRAGMENT_NAME, + }, + proto::{FallibleModelExt, ModelExt}, + provider::{ + http::{BeaconApiErrorExt, BeaconApiProvider, BlockId}, + models::{self, BeaconCursorExt}, + }, +}; + +#[derive(Clone)] +pub struct BeaconChainBlockIngestion { + provider: BeaconApiProvider, +} + +impl BeaconChainBlockIngestion { + pub fn new(provider: BeaconApiProvider) -> Self { + Self { provider } + } + + async fn ingest_block_by_id( + &self, + block_id: BlockId, + ) -> Result, IngestionError> { + // Fetch all data using the block root to avoid issues with reorgs. + let block_root = match self.provider.get_block_root(block_id).await { + Ok(header) => header, + Err(err) if err.is_not_found() => return Ok(None), + Err(err) => { + return Err(err).change_context(IngestionError::RpcRequest); + } + }; + + let block_id = BlockId::BlockRoot(block_root.data.root); + + let block = tokio::spawn({ + let provider = self.provider.clone(); + let block_id = block_id.clone(); + async move { + provider + .get_block(block_id.clone()) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get block") + .attach_printable_lazy(|| format!("block id: {block_id:?}")) + } + }) + .instrument(tracing::info_span!("beaconchain_get_block")); + + let blob_sidecar = tokio::spawn({ + let provider = self.provider.clone(); + let block_id = block_id.clone(); + async move { + provider + .get_blob_sidecar(block_id.clone()) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get blob sidecar") + .attach_printable_lazy(|| format!("block id: {block_id:?}")) + } + }) + .instrument(tracing::info_span!("beaconchain_get_blob_sidecar")); + + let validators = tokio::spawn({ + let provider = self.provider.clone(); + let block_id = block_id.clone(); + async move { + match provider.get_validators(block_id.clone()).await { + Ok(response) => Ok(response.data), + Err(err) if err.is_not_found() => Ok(Vec::new()), + Err(err) => Err(err) + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get validators") + .attach_printable_lazy(|| format!("block id: {block_id:?}")), + } + } + }) + .instrument(tracing::info_span!("beaconchain_get_validators")); + + let block = block.await.change_context(IngestionError::RpcRequest)??; + + let mut blobs = blob_sidecar + .await + .change_context(IngestionError::RpcRequest)?? + .data; + let mut validators = validators + .await + .change_context(IngestionError::RpcRequest)??; + + let mut block = block.data.message; + + let block_info = BlockInfo { + number: block.slot, + hash: Hash(block_root.data.root.to_vec()), + parent: Hash(block.parent_root.to_vec()), + }; + + let transactions = if let Some(ref mut execution_payload) = block.body.execution_payload { + std::mem::take(&mut execution_payload.transactions) + } else { + Vec::new() + }; + + validators.sort_by_key(|v| v.index); + blobs.sort_by_key(|b| b.index); + + let header_fragment = { + let header = block.to_proto(); + HeaderFragment { + data: header.encode_to_vec(), + } + }; + + let (body, index, join) = collect_block_body_and_index(&transactions, &validators, &blobs)?; + + let block = Block { + header: header_fragment, + index, + join, + body, + }; + + Ok((block_info, block).into()) + } + + async fn get_block_info_for_missed_slot( + &self, + block_number: u64, + ) -> Result { + let parent_hash = match self + .provider + .get_block_root(BlockId::Slot(block_number - 1)) + .await + { + Ok(response) => response.data.root, + Err(err) if err.is_not_found() => models::B256::default(), + Err(err) => { + return Err(err).change_context(IngestionError::RpcRequest); + } + }; + + let hash = Hash([0; 32].to_vec()); + let parent = Hash(parent_hash.to_vec()); + + let block_info = BlockInfo { + number: block_number, + hash, + parent, + }; + + Ok(block_info) + } +} + +impl BlockIngestion for BeaconChainBlockIngestion { + #[tracing::instrument("beaconchain_get_head_cursor", skip(self), err(Debug))] + async fn get_head_cursor(&self) -> Result { + let cursor = self + .provider + .get_header(BlockId::Head) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get head header")? + .cursor(); + Ok(cursor) + } + + #[tracing::instrument("beaconchain_get_finalized_cursor", skip(self), err(Debug))] + async fn get_finalized_cursor(&self) -> Result { + let cursor = self + .provider + .get_header(BlockId::Finalized) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get finalized header")? + .cursor(); + Ok(cursor) + } + + #[tracing::instrument("beaconchain_get_block_info_by_number", skip(self), err(Debug))] + async fn get_block_info_by_number( + &self, + block_number: u64, + ) -> Result { + let header = match self.provider.get_header(BlockId::Slot(block_number)).await { + Ok(header) => header, + Err(err) if err.is_not_found() => { + return self.get_block_info_for_missed_slot(block_number).await + } + Err(err) => { + return Err(err) + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get header by number") + .attach_printable_lazy(|| format!("block number: {}", block_number)) + } + }; + + let hash = header.data.root; + let parent = header.data.header.message.parent_root; + let number = header.data.header.message.slot; + + Ok(BlockInfo { + number, + hash: Hash(hash.0.to_vec()), + parent: Hash(parent.0.to_vec()), + }) + } + + #[tracing::instrument("beaconchain_ingest_block_by_number", skip(self), err(Debug))] + async fn ingest_block_by_number( + &self, + block_number: u64, + ) -> Result<(BlockInfo, Block), IngestionError> { + let block = self + .ingest_block_by_id(BlockId::Slot(block_number)) + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get block by number") + .attach_printable_lazy(|| format!("block number: {}", block_number)) + .await?; + + if let Some((block_info, block)) = block { + return Ok((block_info, block)); + } + + if block_number == 0 { + return Err(IngestionError::BlockNotFound).attach_printable("genesis block not found"); + } + + let block_info = self.get_block_info_for_missed_slot(block_number).await?; + + // Missed slots have no data and no indices. + let header_fragment = HeaderFragment { data: Vec::new() }; + + let index_fragment = IndexGroupFragment { + indexes: vec![ + IndexFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + range_start: 0, + range_len: 0, + indexes: Vec::default(), + }, + IndexFragment { + fragment_id: VALIDATOR_FRAGMENT_ID, + range_start: 0, + range_len: 0, + indexes: Vec::default(), + }, + IndexFragment { + fragment_id: BLOB_FRAGMENT_ID, + range_start: 0, + range_len: 0, + indexes: Vec::default(), + }, + ], + }; + + let join_fragment = JoinGroupFragment { + joins: vec![ + JoinFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + joins: vec![Join { + to_fragment_id: BLOB_FRAGMENT_ID, + index: JoinToManyIndex::default().into(), + }], + }, + JoinFragment { + fragment_id: VALIDATOR_FRAGMENT_ID, + joins: Vec::default(), + }, + JoinFragment { + fragment_id: BLOB_FRAGMENT_ID, + joins: vec![Join { + to_fragment_id: TRANSACTION_FRAGMENT_ID, + index: JoinToOneIndex::default().into(), + }], + }, + ], + }; + + let body_fragments = vec![ + BodyFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + name: TRANSACTION_FRAGMENT_NAME.to_string(), + data: Vec::default(), + }, + BodyFragment { + fragment_id: VALIDATOR_FRAGMENT_ID, + name: VALIDATOR_FRAGMENT_NAME.to_string(), + data: Vec::default(), + }, + BodyFragment { + fragment_id: BLOB_FRAGMENT_ID, + name: BLOB_FRAGMENT_NAME.to_string(), + data: Vec::default(), + }, + ]; + + let block = Block { + header: header_fragment, + index: index_fragment, + body: body_fragments, + join: join_fragment, + }; + + Ok((block_info, block)) + } +} + +pub fn collect_block_body_and_index( + transactions: &[models::Bytes], + validators: &[models::Validator], + blobs: &[models::BlobSidecar], +) -> Result<(Vec, IndexGroupFragment, JoinGroupFragment), IngestionError> { + let transactions = transactions + .iter() + .map(|bytes| decode_transaction(bytes)) + .collect::, _>>()?; + + let mut block_transactions = Vec::new(); + let mut block_validators = Vec::new(); + let mut block_blobs = Vec::new(); + + let mut index_transaction_by_from_address = BitmapIndexBuilder::default(); + let mut index_transaction_by_to_address = BitmapIndexBuilder::default(); + let mut index_transaction_by_create = BitmapIndexBuilder::default(); + let mut join_transaction_to_blobs = JoinToManyIndexBuilder::default(); + + let mut index_validator_by_index = BitmapIndexBuilder::default(); + let mut index_validator_by_status = BitmapIndexBuilder::default(); + + let mut join_blob_to_transaction = JoinToOneIndexBuilder::default(); + + for (transaction_index, transaction) in transactions.into_iter().enumerate() { + let transaction_index = transaction_index as u32; + + let mut transaction = transaction.to_proto()?; + transaction.transaction_index = transaction_index; + + if let Some(from) = transaction.from { + index_transaction_by_from_address + .insert(ScalarValue::B160(from.to_bytes()), transaction_index); + } + + match transaction.to { + Some(to) => { + index_transaction_by_to_address + .insert(ScalarValue::B160(to.to_bytes()), transaction_index); + index_transaction_by_create.insert(ScalarValue::Bool(false), transaction_index); + } + None => { + index_transaction_by_create.insert(ScalarValue::Bool(true), transaction_index); + } + } + + block_transactions.push(transaction); + } + + for (validator_offset, validator) in validators.iter().enumerate() { + let validator_offset = validator_offset as u32; + let validator = validator.to_proto(); + + index_validator_by_index.insert( + ScalarValue::Uint32(validator.validator_index), + validator_offset, + ); + + index_validator_by_status.insert(ScalarValue::Int32(validator.status), validator_offset); + + block_validators.push(validator); + } + + for blob in blobs.iter() { + let mut blob = blob.to_proto(); + + let Some(tx) = block_transactions.iter().find(|tx| { + tx.blob_versioned_hashes + .contains(&blob.blob_hash.unwrap_or_default()) + }) else { + return Err(IngestionError::Model) + .attach_printable("no transaction found for blob") + .attach_printable_lazy(|| { + format!("blob hash: {}", blob.blob_hash.unwrap_or_default()) + }); + }; + + blob.transaction_index = tx.transaction_index; + blob.transaction_hash = tx.transaction_hash; + + join_blob_to_transaction.insert(blob.blob_index, tx.transaction_index); + join_transaction_to_blobs.insert(tx.transaction_index, blob.blob_index); + + block_blobs.push(blob); + } + + let transaction_index = { + let index_transaction_by_from_address = Index { + index_id: INDEX_TRANSACTION_BY_FROM_ADDRESS, + index: index_transaction_by_from_address + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_transaction_by_to_address = Index { + index_id: INDEX_TRANSACTION_BY_TO_ADDRESS, + index: index_transaction_by_to_address + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_transaction_by_create = Index { + index_id: INDEX_TRANSACTION_BY_CREATE, + index: index_transaction_by_create + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + IndexFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + range_start: 0, + range_len: block_transactions.len() as u32, + indexes: vec![ + index_transaction_by_from_address, + index_transaction_by_to_address, + index_transaction_by_create, + ], + } + }; + + let transaction_join = { + let join_transaction_to_blobs = Join { + to_fragment_id: BLOB_FRAGMENT_ID, + index: join_transaction_to_blobs + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + JoinFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + joins: vec![join_transaction_to_blobs], + } + }; + + let transaction_fragment = BodyFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + name: TRANSACTION_FRAGMENT_NAME.to_string(), + data: block_transactions + .iter() + .map(Message::encode_to_vec) + .collect(), + }; + + let validator_index = { + let index_validator_by_index = Index { + index_id: INDEX_VALIDATOR_BY_INDEX, + index: index_validator_by_index + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_validator_by_status = Index { + index_id: INDEX_VALIDATOR_BY_STATUS, + index: index_validator_by_status + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + IndexFragment { + fragment_id: VALIDATOR_FRAGMENT_ID, + range_start: 0, + range_len: block_validators.len() as u32, + indexes: vec![index_validator_by_index, index_validator_by_status], + } + }; + + let validator_join = JoinFragment { + fragment_id: VALIDATOR_FRAGMENT_ID, + joins: Vec::default(), + }; + + let validator_fragment = BodyFragment { + fragment_id: VALIDATOR_FRAGMENT_ID, + name: VALIDATOR_FRAGMENT_NAME.to_string(), + data: block_validators + .iter() + .map(Message::encode_to_vec) + .collect(), + }; + + let blob_index = IndexFragment { + fragment_id: BLOB_FRAGMENT_ID, + range_start: 0, + range_len: block_blobs.len() as u32, + indexes: Vec::new(), + }; + + let blob_join = { + let join_blob_to_transaction = Join { + to_fragment_id: TRANSACTION_FRAGMENT_ID, + index: join_blob_to_transaction.build().into(), + }; + + JoinFragment { + fragment_id: BLOB_FRAGMENT_ID, + joins: vec![join_blob_to_transaction], + } + }; + + let blob_fragment = BodyFragment { + fragment_id: BLOB_FRAGMENT_ID, + name: BLOB_FRAGMENT_NAME.to_string(), + data: block_blobs.iter().map(Message::encode_to_vec).collect(), + }; + + let index_group = IndexGroupFragment { + indexes: vec![transaction_index, validator_index, blob_index], + }; + + let join_group = JoinGroupFragment { + joins: vec![transaction_join, validator_join, blob_join], + }; + + Ok(( + vec![transaction_fragment, validator_fragment, blob_fragment], + index_group, + join_group, + )) +} + +pub fn decode_transaction(mut bytes: &[u8]) -> Result { + models::TxEnvelope::network_decode(&mut bytes) + .change_context(IngestionError::Model) + .attach_printable("failed to decode EIP 2718 transaction") +} diff --git a/beaconchain/src/lib.rs b/beaconchain/src/lib.rs new file mode 100644 index 00000000..77005452 --- /dev/null +++ b/beaconchain/src/lib.rs @@ -0,0 +1,56 @@ +use apibara_dna_common::{fragment::FragmentInfo, ChainSupport}; +use filter::BeaconChainFilterFactory; +use fragment::{ + BLOB_FRAGMENT_ID, BLOB_FRAGMENT_NAME, TRANSACTION_FRAGMENT_ID, TRANSACTION_FRAGMENT_NAME, + VALIDATOR_FRAGMENT_ID, VALIDATOR_FRAGMENT_NAME, +}; +use ingestion::BeaconChainBlockIngestion; +use provider::http::BeaconApiProvider; + +pub mod cli; +pub mod error; +pub mod filter; +pub mod fragment; +pub mod ingestion; +pub mod proto; +pub mod provider; + +pub struct BeaconChainChainSupport { + provider: BeaconApiProvider, +} + +impl BeaconChainChainSupport { + pub fn new(provider: BeaconApiProvider) -> Self { + Self { provider } + } +} + +impl ChainSupport for BeaconChainChainSupport { + type BlockIngestion = BeaconChainBlockIngestion; + type BlockFilterFactory = BeaconChainFilterFactory; + + fn fragment_info(&self) -> Vec { + vec![ + FragmentInfo { + fragment_id: TRANSACTION_FRAGMENT_ID, + name: TRANSACTION_FRAGMENT_NAME.to_string(), + }, + FragmentInfo { + fragment_id: VALIDATOR_FRAGMENT_ID, + name: VALIDATOR_FRAGMENT_NAME.to_string(), + }, + FragmentInfo { + fragment_id: BLOB_FRAGMENT_ID, + name: BLOB_FRAGMENT_NAME.to_string(), + }, + ] + } + + fn block_filter_factory(&self) -> Self::BlockFilterFactory { + BeaconChainFilterFactory + } + + fn block_ingestion(&self) -> Self::BlockIngestion { + BeaconChainBlockIngestion::new(self.provider.clone()) + } +} diff --git a/beaconchain/src/proto.rs b/beaconchain/src/proto.rs new file mode 100644 index 00000000..a33c64ac --- /dev/null +++ b/beaconchain/src/proto.rs @@ -0,0 +1,356 @@ +use apibara_dna_common::ingestion::IngestionError; +use apibara_dna_protocol::beaconchain; +use error_stack::{Result, ResultExt}; + +use crate::provider::models; + +pub trait ModelExt { + type Proto; + + fn to_proto(&self) -> Self::Proto; +} + +pub trait FallibleModelExt { + type Proto; + + fn to_proto(&self) -> Result; +} + +trait TxKindExt { + fn to_option(self) -> Option; +} + +impl ModelExt for models::BeaconBlock { + type Proto = beaconchain::BlockHeader; + + fn to_proto(&self) -> Self::Proto { + beaconchain::BlockHeader { + slot: self.slot, + proposer_index: self.proposer_index, + parent_root: self.parent_root.to_proto().into(), + state_root: self.state_root.to_proto().into(), + randao_reveal: self.body.randao_reveal.to_vec(), + deposit_count: self.body.eth1_data.deposit_count, + deposit_root: self.body.eth1_data.deposit_root.to_proto().into(), + block_hash: self.body.eth1_data.block_hash.to_proto().into(), + graffiti: self.body.graffiti.to_proto().into(), + execution_payload: self.body.execution_payload.as_ref().map(ModelExt::to_proto), + blob_kzg_commitments: self + .body + .blob_kzg_commitments + .iter() + .map(ModelExt::to_proto) + .collect(), + } + } +} + +impl ModelExt for models::ExecutionPayload { + type Proto = beaconchain::ExecutionPayload; + + fn to_proto(&self) -> Self::Proto { + let timestamp = prost_types::Timestamp { + seconds: self.timestamp as i64, + nanos: 0, + }; + + beaconchain::ExecutionPayload { + parent_hash: self.parent_hash.to_proto().into(), + fee_recipient: self.fee_recipient.to_proto().into(), + state_root: self.state_root.to_proto().into(), + receipts_root: self.receipts_root.to_proto().into(), + logs_bloom: self.logs_bloom.to_vec(), + prev_randao: self.prev_randao.to_proto().into(), + block_number: self.block_number, + timestamp: timestamp.into(), + } + } +} + +impl FallibleModelExt for models::TxEnvelope { + type Proto = beaconchain::Transaction; + + fn to_proto(&self) -> Result { + match self { + models::TxEnvelope::Legacy(tx) => tx.to_proto(), + models::TxEnvelope::Eip2930(tx) => tx.to_proto(), + models::TxEnvelope::Eip1559(tx) => tx.to_proto(), + models::TxEnvelope::Eip4844(tx) => tx.to_proto(), + _ => Err(IngestionError::Model).attach_printable("unknown transaction type"), + } + } +} + +impl FallibleModelExt for models::Signed { + type Proto = beaconchain::Transaction; + + fn to_proto(&self) -> Result { + let from = self + .recover_signer() + .change_context(IngestionError::Model) + .attach_printable("failed to recover sender of legacy transaction")?; + let tx = self.tx(); + + Ok(beaconchain::Transaction { + filter_ids: Vec::default(), + transaction_type: models::TxType::Legacy as u64, + transaction_index: u32::MAX, + transaction_hash: self.hash().to_proto().into(), + nonce: tx.nonce, + from: from.to_proto().into(), + to: tx.to.to_option(), + value: tx.value.to_proto().into(), + gas_price: tx.gas_price.to_proto().into(), + gas_limit: tx.gas_limit.to_proto().into(), + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + max_fee_per_blob_gas: None, + input: tx.input.to_vec(), + signature: self.signature().to_proto().into(), + chain_id: tx.chain_id, + access_list: Vec::default(), + blob_versioned_hashes: Vec::default(), + }) + } +} + +impl FallibleModelExt for models::Signed { + type Proto = beaconchain::Transaction; + + fn to_proto(&self) -> Result { + let from = self + .recover_signer() + .change_context(IngestionError::Model) + .attach_printable("failed to recover sender of EIP-2930 transaction")?; + let tx = self.tx(); + + Ok(beaconchain::Transaction { + filter_ids: Vec::default(), + transaction_type: models::TxType::Eip2930 as u64, + transaction_index: u32::MAX, + transaction_hash: self.hash().to_proto().into(), + nonce: tx.nonce, + from: from.to_proto().into(), + to: tx.to.to_option(), + value: tx.value.to_proto().into(), + gas_price: tx.gas_price.to_proto().into(), + gas_limit: tx.gas_limit.to_proto().into(), + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + max_fee_per_blob_gas: None, + input: tx.input.to_vec(), + signature: self.signature().to_proto().into(), + chain_id: tx.chain_id.into(), + access_list: tx.access_list.iter().map(ModelExt::to_proto).collect(), + blob_versioned_hashes: Vec::default(), + }) + } +} + +impl FallibleModelExt for models::Signed { + type Proto = beaconchain::Transaction; + + fn to_proto(&self) -> Result { + let from = self + .recover_signer() + .change_context(IngestionError::Model) + .attach_printable("failed to recover sender of EIP-1559 transaction")?; + let tx = self.tx(); + + Ok(beaconchain::Transaction { + filter_ids: Vec::default(), + transaction_type: models::TxType::Eip1559 as u64, + transaction_index: u32::MAX, + transaction_hash: self.hash().to_proto().into(), + nonce: tx.nonce, + from: from.to_proto().into(), + to: tx.to.to_option(), + value: tx.value.to_proto().into(), + gas_price: None, + gas_limit: tx.gas_limit.to_proto().into(), + max_fee_per_gas: tx.max_fee_per_gas.to_proto().into(), + max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to_proto().into(), + max_fee_per_blob_gas: None, + input: tx.input.to_vec(), + signature: self.signature().to_proto().into(), + chain_id: tx.chain_id.into(), + access_list: tx.access_list.iter().map(ModelExt::to_proto).collect(), + blob_versioned_hashes: Vec::default(), + }) + } +} + +impl FallibleModelExt for models::Signed { + type Proto = beaconchain::Transaction; + + fn to_proto(&self) -> Result { + let from = self + .recover_signer() + .change_context(IngestionError::Model) + .attach_printable("failed to recover sender of EIP-4844 transaction")?; + let tx = self.tx().tx(); + + Ok(beaconchain::Transaction { + filter_ids: Vec::default(), + transaction_type: models::TxType::Eip4844 as u64, + transaction_index: u32::MAX, + transaction_hash: self.hash().to_proto().into(), + nonce: tx.nonce, + from: from.to_proto().into(), + to: tx.to.to_proto().into(), + value: tx.value.to_proto().into(), + gas_price: None, + gas_limit: tx.gas_limit.to_proto().into(), + max_fee_per_gas: tx.max_fee_per_gas.to_proto().into(), + max_priority_fee_per_gas: tx.max_priority_fee_per_gas.to_proto().into(), + max_fee_per_blob_gas: tx.max_fee_per_blob_gas.to_proto().into(), + input: tx.input.to_vec(), + signature: self.signature().to_proto().into(), + chain_id: tx.chain_id.into(), + access_list: tx.access_list.iter().map(ModelExt::to_proto).collect(), + blob_versioned_hashes: tx + .blob_versioned_hashes + .iter() + .map(ModelExt::to_proto) + .collect(), + }) + } +} + +impl ModelExt for models::Signature { + type Proto = beaconchain::Signature; + + fn to_proto(&self) -> Self::Proto { + beaconchain::Signature { + r: self.r().to_proto().into(), + s: self.s().to_proto().into(), + } + } +} + +impl ModelExt for models::AccessListItem { + type Proto = beaconchain::AccessListItem; + + fn to_proto(&self) -> Self::Proto { + beaconchain::AccessListItem { + address: self.address.to_proto().into(), + storage_keys: self.storage_keys.iter().map(ModelExt::to_proto).collect(), + } + } +} + +impl ModelExt for models::Validator { + type Proto = beaconchain::Validator; + + fn to_proto(&self) -> Self::Proto { + beaconchain::Validator { + filter_ids: Vec::default(), + validator_index: self.index, + balance: self.balance, + status: self.status.to_proto() as i32, + pubkey: self.validator.pubkey.to_proto().into(), + withdrawal_credentials: self.validator.withdrawal_credentials.to_proto().into(), + effective_balance: self.validator.effective_balance, + slashed: self.validator.slashed, + activation_eligibility_epoch: self.validator.activation_eligibility_epoch, + activation_epoch: self.validator.activation_epoch, + exit_epoch: self.validator.exit_epoch, + withdrawable_epoch: self.validator.withdrawable_epoch, + } + } +} + +impl ModelExt for models::BlobSidecar { + type Proto = beaconchain::Blob; + + fn to_proto(&self) -> Self::Proto { + beaconchain::Blob { + filter_ids: Vec::default(), + blob_index: self.index, + blob: self.blob.to_vec(), + kzg_commitment: self.kzg_commitment.to_proto().into(), + kzg_proof: self.kzg_proof.to_proto().into(), + kzg_commitment_inclusion_proof: self + .kzg_commitment_inclusion_proof + .iter() + .map(ModelExt::to_proto) + .collect(), + blob_hash: self.hash().to_proto().into(), + transaction_index: u32::MAX, + transaction_hash: None, + } + } +} + +impl ModelExt for models::ValidatorStatus { + type Proto = beaconchain::ValidatorStatus; + + fn to_proto(&self) -> Self::Proto { + match self { + models::ValidatorStatus::PendingInitialized => { + beaconchain::ValidatorStatus::PendingInitialized + } + models::ValidatorStatus::PendingQueued => beaconchain::ValidatorStatus::PendingQueued, + models::ValidatorStatus::ActiveOngoing => beaconchain::ValidatorStatus::ActiveOngoing, + &models::ValidatorStatus::ActiveExiting => beaconchain::ValidatorStatus::ActiveExiting, + models::ValidatorStatus::ActiveSlashed => beaconchain::ValidatorStatus::ActiveSlashed, + models::ValidatorStatus::ExitedUnslashed => { + beaconchain::ValidatorStatus::ExitedUnslashed + } + models::ValidatorStatus::ExitedSlashed => beaconchain::ValidatorStatus::ExitedSlashed, + models::ValidatorStatus::WithdrawalPossible => { + beaconchain::ValidatorStatus::WithdrawalPossible + } + models::ValidatorStatus::WithdrawalDone => beaconchain::ValidatorStatus::WithdrawalDone, + } + } +} + +impl ModelExt for models::B256 { + type Proto = beaconchain::B256; + + fn to_proto(&self) -> Self::Proto { + beaconchain::B256::from_bytes(&self.0) + } +} + +impl ModelExt for models::U256 { + type Proto = beaconchain::U256; + + fn to_proto(&self) -> Self::Proto { + beaconchain::U256::from_bytes(&self.to_be_bytes()) + } +} + +impl ModelExt for u128 { + type Proto = beaconchain::U128; + + fn to_proto(&self) -> Self::Proto { + beaconchain::U128::from_bytes(&self.to_be_bytes()) + } +} + +impl ModelExt for models::B384 { + type Proto = beaconchain::B384; + + fn to_proto(&self) -> Self::Proto { + beaconchain::B384::from_bytes(&self.to_be_bytes()) + } +} + +impl ModelExt for models::Address { + type Proto = beaconchain::Address; + + fn to_proto(&self) -> Self::Proto { + beaconchain::Address::from_bytes(&self.0) + } +} + +impl TxKindExt for models::TxKind { + fn to_option(self) -> Option { + match self { + models::TxKind::Create => None, + models::TxKind::Call(to) => to.to_proto().into(), + } + } +} diff --git a/beaconchain/src/provider/http.rs b/beaconchain/src/provider/http.rs new file mode 100644 index 00000000..64ba0efc --- /dev/null +++ b/beaconchain/src/provider/http.rs @@ -0,0 +1,308 @@ +use std::{fmt::Debug, time::Duration}; + +use error_stack::{Report, Result, ResultExt}; +use reqwest::{ + header::{HeaderMap, HeaderValue}, + Client, +}; + +use crate::provider::models; + +#[derive(Debug)] +pub enum BeaconApiError { + Request, + NotFound, + DeserializeResponse, + Timeout, + Unauthorized, + ServerError, +} + +/// Block identifier. +#[derive(Debug, Clone)] +pub enum BlockId { + /// Current head block. + Head, + /// Most recent finalized block. + Finalized, + /// Block by slot. + Slot(u64), + /// Block by root. + BlockRoot(models::B256), +} + +#[derive(Clone)] +pub struct BeaconApiProvider { + client: Client, + url: String, + options: BeaconApiProviderOptions, +} + +#[derive(Debug, Clone)] +pub struct BeaconApiProviderOptions { + /// Timeout for normal requests. + pub timeout: Duration, + /// Timeout for validators requests. + pub validators_timeout: Duration, + /// Headers to send with the requests. + pub headers: HeaderMap, +} + +impl BeaconApiProvider { + pub fn new(url: impl Into, options: BeaconApiProviderOptions) -> Self { + let url = url.into().trim_end_matches('/').to_string(); + Self { + client: Client::new(), + url, + options, + } + } + + pub async fn get_header( + &self, + block_id: BlockId, + ) -> Result { + let request = HeaderRequest::new(block_id); + self.send_request(request, self.options.timeout).await + } + + pub async fn get_block( + &self, + block_id: BlockId, + ) -> Result { + let request = BlockRequest::new(block_id); + self.send_request(request, self.options.timeout).await + } + + pub async fn get_blob_sidecar( + &self, + block_id: BlockId, + ) -> Result { + let request = BlobSidecarRequest::new(block_id); + self.send_request(request, self.options.timeout).await + } + + pub async fn get_validators( + &self, + block_id: BlockId, + ) -> Result { + let request = ValidatorsRequest::new(block_id); + self.send_request(request, self.options.validators_timeout) + .await + } + + pub async fn get_block_root( + &self, + block_id: BlockId, + ) -> Result { + let request = BlockRootRequest::new(block_id); + self.send_request(request, self.options.timeout).await + } + + /// Send a request to the beacon node. + /// + /// TODO: this function can be turned into a `Transport` trait if we ever need it. + #[tracing::instrument(level = "debug", skip(self))] + async fn send_request( + &self, + request: Req, + timeout: Duration, + ) -> Result + where + Req: BeaconApiRequest + Debug, + { + let url = format!("{}{}", self.url, request.path()); + let response = match self + .client + .get(&url) + .header("Content-Type", "application/json") + .headers(self.options.headers.clone()) + .timeout(timeout) + .send() + .await + { + Ok(response) => response, + Err(err) if err.is_timeout() => { + return Err(err).change_context(BeaconApiError::Timeout); + } + Err(err) => { + return Err(err).change_context(BeaconApiError::Timeout); + } + }; + + if response.status().as_u16() == 404 { + return Err(BeaconApiError::NotFound.into()); + } + + if response.status().as_u16() == 401 { + return Err(BeaconApiError::Unauthorized.into()); + } + + if response.status().as_u16() != 200 { + return Err(BeaconApiError::ServerError.into()); + } + + let text_response = response + .text() + .await + .change_context(BeaconApiError::Request)?; + + let response = serde_json::from_str(&text_response) + .change_context(BeaconApiError::DeserializeResponse)?; + + Ok(response) + } +} + +pub trait BeaconApiRequest { + type Response: serde::de::DeserializeOwned; + + fn path(&self) -> String; +} + +#[derive(Debug)] +pub struct HeaderRequest { + block_id: BlockId, +} + +#[derive(Debug)] +pub struct BlockRequest { + block_id: BlockId, +} + +#[derive(Debug)] +pub struct BlobSidecarRequest { + block_id: BlockId, +} + +#[derive(Debug)] +pub struct ValidatorsRequest { + block_id: BlockId, +} + +#[derive(Debug)] +pub struct BlockRootRequest { + block_id: BlockId, +} + +impl std::fmt::Display for BlockId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockId::Head => write!(f, "head"), + BlockId::Finalized => write!(f, "finalized"), + BlockId::Slot(slot) => write!(f, "{}", slot), + BlockId::BlockRoot(root) => write!(f, "{}", root), + } + } +} + +pub trait BeaconApiErrorExt { + fn is_not_found(&self) -> bool; +} + +impl BeaconApiError { + pub fn is_not_found(&self) -> bool { + matches!(self, BeaconApiError::NotFound) + } +} + +impl BeaconApiErrorExt for Report { + fn is_not_found(&self) -> bool { + self.current_context().is_not_found() + } +} + +impl std::fmt::Display for BeaconApiError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BeaconApiError::Request => write!(f, "failed to send request"), + BeaconApiError::DeserializeResponse => write!(f, "failed to deserialize response"), + BeaconApiError::NotFound => write!(f, "not found"), + BeaconApiError::Timeout => write!(f, "the request timed out"), + BeaconApiError::Unauthorized => write!(f, "unauthorized"), + BeaconApiError::ServerError => write!(f, "server error"), + } + } +} + +impl error_stack::Context for BeaconApiError {} + +impl HeaderRequest { + pub fn new(block_id: BlockId) -> Self { + Self { block_id } + } +} + +impl BeaconApiRequest for HeaderRequest { + type Response = models::HeaderResponse; + + fn path(&self) -> String { + format!("/eth/v1/beacon/headers/{}", self.block_id) + } +} + +impl BlockRequest { + pub fn new(block_id: BlockId) -> Self { + Self { block_id } + } +} + +impl BeaconApiRequest for BlockRequest { + type Response = models::BeaconBlockResponse; + + fn path(&self) -> String { + format!("/eth/v2/beacon/blocks/{}", self.block_id) + } +} + +impl BlobSidecarRequest { + pub fn new(block_id: BlockId) -> Self { + Self { block_id } + } +} + +impl BeaconApiRequest for BlobSidecarRequest { + type Response = models::BlobSidecarResponse; + + fn path(&self) -> String { + format!("/eth/v1/beacon/blob_sidecars/{}", self.block_id) + } +} + +impl ValidatorsRequest { + pub fn new(block_id: BlockId) -> Self { + Self { block_id } + } +} + +impl BeaconApiRequest for ValidatorsRequest { + type Response = models::ValidatorsResponse; + + fn path(&self) -> String { + format!("/eth/v1/beacon/states/{}/validators", self.block_id) + } +} + +impl BlockRootRequest { + pub fn new(block_id: BlockId) -> Self { + Self { block_id } + } +} + +impl BeaconApiRequest for BlockRootRequest { + type Response = models::BlockRootResponse; + + fn path(&self) -> String { + format!("/eth/v1/beacon/blocks/{}/root", self.block_id) + } +} + +impl Default for BeaconApiProviderOptions { + fn default() -> Self { + Self { + timeout: Duration::from_secs(5), + validators_timeout: Duration::from_secs(60), + headers: HeaderMap::default(), + } + } +} diff --git a/beaconchain/src/provider/mod.rs b/beaconchain/src/provider/mod.rs new file mode 100644 index 00000000..7ebdc23d --- /dev/null +++ b/beaconchain/src/provider/mod.rs @@ -0,0 +1,3 @@ +pub mod http; +pub mod models; +pub mod utils; diff --git a/beaconchain/src/provider/models.rs b/beaconchain/src/provider/models.rs new file mode 100644 index 00000000..aa0f3906 --- /dev/null +++ b/beaconchain/src/provider/models.rs @@ -0,0 +1,188 @@ +use apibara_dna_common::{Cursor, GetCursor, Hash}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DefaultOnNull, DisplayFromStr}; + +pub use alloy_consensus::{ + Signed, TxEip1559, TxEip2930, TxEip4844, TxEip4844Variant, TxEip4844WithSidecar, TxEnvelope, + TxLegacy, TxType, +}; +pub use alloy_eips::eip2930::AccessListItem; +pub use alloy_primitives::{ruint::aliases::B384, Address, Bytes, Signature, TxKind, B256, U256}; +pub use alloy_rpc_types_beacon::header::HeaderResponse; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlockRootResponse { + pub data: BlockRoot, + pub finalized: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlockRoot { + pub root: B256, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BeaconBlockResponse { + pub finalized: bool, + pub data: BeaconBlockData, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BeaconBlockData { + pub message: BeaconBlock, + pub signature: Bytes, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BeaconBlock { + #[serde_as(as = "DisplayFromStr")] + pub slot: u64, + #[serde_as(as = "DisplayFromStr")] + pub proposer_index: u32, + pub parent_root: B256, + pub state_root: B256, + pub body: BeaconBlockBody, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BeaconBlockBody { + pub randao_reveal: Bytes, + pub eth1_data: Eth1Data, + pub graffiti: B256, + pub execution_payload: Option, + #[serde(default)] + pub blob_kzg_commitments: Vec, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Eth1Data { + #[serde_as(as = "DisplayFromStr")] + pub deposit_count: u64, + pub deposit_root: B256, + pub block_hash: B256, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ExecutionPayload { + pub parent_hash: B256, + pub fee_recipient: Address, + pub state_root: B256, + pub receipts_root: B256, + pub logs_bloom: Bytes, + pub prev_randao: B256, + #[serde_as(as = "DisplayFromStr")] + pub block_number: u64, + #[serde_as(as = "DisplayFromStr")] + pub timestamp: u64, + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(default)] + pub transactions: Vec, + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(default)] + pub withdrawals: Vec, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Withdrawal { + #[serde_as(as = "DisplayFromStr")] + pub index: u64, + #[serde_as(as = "DisplayFromStr")] + pub validator_index: u32, + pub address: Address, + #[serde_as(as = "DisplayFromStr")] + pub amount: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlobSidecarResponse { + pub data: Vec, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlobSidecar { + #[serde_as(as = "DisplayFromStr")] + pub index: u32, + pub blob: Bytes, + pub kzg_commitment: B384, + pub kzg_proof: B384, + pub kzg_commitment_inclusion_proof: Vec, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ValidatorInfo { + pub pubkey: B384, + pub withdrawal_credentials: B256, + #[serde_as(as = "DisplayFromStr")] + pub effective_balance: u64, + pub slashed: bool, + #[serde_as(as = "DisplayFromStr")] + pub activation_eligibility_epoch: u64, + #[serde_as(as = "DisplayFromStr")] + pub activation_epoch: u64, + #[serde_as(as = "DisplayFromStr")] + pub exit_epoch: u64, + #[serde_as(as = "DisplayFromStr")] + pub withdrawable_epoch: u64, +} + +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Validator { + #[serde_as(as = "DisplayFromStr")] + pub index: u32, + #[serde_as(as = "DisplayFromStr")] + pub balance: u64, + pub validator: ValidatorInfo, + pub status: ValidatorStatus, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Hash)] +#[serde(rename_all = "snake_case")] +pub enum ValidatorStatus { + PendingInitialized = 0, + PendingQueued = 1, + ActiveOngoing = 2, + ActiveExiting = 3, + ActiveSlashed = 4, + ExitedUnslashed = 5, + ExitedSlashed = 6, + WithdrawalPossible = 7, + WithdrawalDone = 8, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ValidatorsResponse { + pub data: Vec, +} + +impl BlobSidecar { + pub fn hash(&self) -> B256 { + super::utils::kzg_commitment_to_versioned_hash(&self.kzg_commitment) + } +} + +impl GetCursor for BeaconBlock { + fn cursor(&self) -> Option { + let hash = Hash(self.state_root.0.to_vec()); + Some(Cursor::new(self.slot, hash)) + } +} + +pub trait BeaconCursorExt { + fn cursor(&self) -> Cursor; +} + +impl BeaconCursorExt for HeaderResponse { + fn cursor(&self) -> Cursor { + let hash = Hash(self.data.root.0.to_vec()); + Cursor::new(self.data.header.message.slot, hash) + } +} diff --git a/beaconchain/src/provider/utils.rs b/beaconchain/src/provider/utils.rs new file mode 100644 index 00000000..e2b54eb5 --- /dev/null +++ b/beaconchain/src/provider/utils.rs @@ -0,0 +1,28 @@ +use crate::provider::models::{B256, B384}; + +pub fn kzg_commitment_to_versioned_hash(commitment: &B384) -> B256 { + alloy_eips::eip4844::kzg_to_versioned_hash(&commitment.to_be_bytes::<48>()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::provider::models::{B256, B384}; + + #[test] + pub fn test_kzg_commitment_to_versioned_hash() { + // commitment - hash + let test_cases = [ + ("8a2461b2ad767d96d11fe783fc63023fcde21d8dd03064056fa522ebbfae185ec1f82025b627977603b014ec64c5ee19", "0x011a3bb3c2a1d4bf04e4501628ba351bd2a5eb8971daf6d6d47ca5a79d8589bb"), + ("9007ff0d9ca54b8fe0b25ae5bdb8fa2ee30249f88c4da33a6a8d8ab09828c1100353a0f6dd0f97dfc493ac942462e2e0", "0x012ba1b06de5dfa8cf48db8e1b4934b6f4011c5ca31afeffd4c990e3b45464c5") + ]; + + for (commitment, expected) in test_cases { + let commitment = B384::from_str_radix(commitment, 16).unwrap(); + let hash = super::kzg_commitment_to_versioned_hash(&commitment); + let expected = B256::from_str(expected).unwrap(); + assert_eq!(hash, expected); + } + } +} diff --git a/benchmark/Cargo.toml b/benchmark/Cargo.toml new file mode 100644 index 00000000..b828f86e --- /dev/null +++ b/benchmark/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "apibara-benchmark" +version = "0.0.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true +license.workspace = true + +[lib] +name = "apibara_benchmark" +path = "src/lib.rs" + +[[bin]] +name = "apibara-benchmark" +path = "src/bin.rs" + +[dependencies] +apibara-observability = { path = "../observability" } +apibara-dna-common = { path = "../common" } +apibara-dna-protocol = { path = "../protocol" } +byte-unit.workspace = true +clap.workspace = true +ctrlc.workspace = true +futures.workspace = true +error-stack.workspace = true +hex.workspace = true +prost.workspace = true +tonic.workspace = true +tokio.workspace = true +tokio-stream = { version = "0.1.15", features = ["sync", "net"] } +tokio-util.workspace = true +tracing.workspace = true diff --git a/benchmark/src/bin.rs b/benchmark/src/bin.rs new file mode 100644 index 00000000..216986a8 --- /dev/null +++ b/benchmark/src/bin.rs @@ -0,0 +1,32 @@ +use apibara_benchmark::{BenchmarkError, Cli}; +use apibara_observability::init_opentelemetry; +use clap::Parser; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +#[tokio::main] +async fn main() -> Result<(), BenchmarkError> { + let args = Cli::parse(); + run_with_args(args).await +} + +async fn run_with_args(args: Cli) -> Result<(), BenchmarkError> { + init_opentelemetry(env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")) + .change_context(BenchmarkError) + .attach_printable("failed to initialize opentelemetry")?; + + let ct = CancellationToken::new(); + + ctrlc::set_handler({ + let ct = ct.clone(); + move || { + info!("SIGINT received"); + ct.cancel(); + } + }) + .change_context(BenchmarkError) + .attach_printable("failed to set SIGINT handler")?; + + args.run(ct).await +} diff --git a/benchmark/src/lib.rs b/benchmark/src/lib.rs new file mode 100644 index 00000000..407506dd --- /dev/null +++ b/benchmark/src/lib.rs @@ -0,0 +1,347 @@ +use std::time::{Duration, Instant}; + +use apibara_dna_protocol::{ + dna::stream::{dna_stream_client::DnaStreamClient, Cursor, StreamDataRequest}, + evm, starknet, +}; +use byte_unit::Byte; +use clap::{Args, Parser, Subcommand}; +use error_stack::{Result, ResultExt}; +use futures::{StreamExt, TryStreamExt}; +use prost::Message; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; +use tracing::info; + +#[derive(Debug)] +pub struct BenchmarkError; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Cli { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +pub enum Command { + /// Benchmark the EVM DNA stream. + Evm(CommonArgs), + /// Benchmark the Starknet DNA stream. + Starknet(CommonArgs), +} + +#[derive(Args, Debug, Clone)] +pub struct CommonArgs { + /// Hex-encoded filter. + #[clap(long, default_value = "0x")] + pub filter: String, + /// Stream URL. + #[clap(long, default_value = "http://localhost:7007")] + pub stream_url: String, + /// Start streaming from this block. + #[clap(long)] + pub starting_block: Option, + /// Stop streaming at this block. + #[clap(long)] + pub ending_block: Option, + #[clap(long, default_value = "1")] + pub concurrency: usize, +} + +impl Cli { + pub async fn run(self, ct: CancellationToken) -> Result<(), BenchmarkError> { + match self.command { + Command::Evm(args) => run_benchmark::(args, ct).await, + Command::Starknet(args) => { + run_benchmark::(args, ct).await + } + } + } +} + +async fn run_benchmark(args: CommonArgs, ct: CancellationToken) -> Result<(), BenchmarkError> +where + F: Message + Clone + Default + Send + 'static, + S: Stats + Send + 'static, +{ + let bytes = hex::decode(&args.filter) + .change_context(BenchmarkError) + .attach_printable("failed to filter hex string")?; + + let filter = ::decode(bytes.as_slice()) + .change_context(BenchmarkError) + .attach_printable("failed to decode filter")?; + + let mut tasks = JoinSet::new(); + for i in 0..args.concurrency { + tasks.spawn(run_benchmark_single::( + i, + args.clone(), + filter.clone(), + ct.clone(), + )); + } + + while let Some(result) = tasks.join_next().await { + result.change_context(BenchmarkError)??; + } + + Ok(()) +} + +async fn run_benchmark_single( + index: usize, + args: CommonArgs, + filter: F, + ct: CancellationToken, +) -> Result<(), BenchmarkError> +where + F: Message + Default + Send, + S: Stats + Send, +{ + let mut client = DnaStreamClient::connect(args.stream_url.clone()) + .await + .change_context(BenchmarkError)?; + + let starting_cursor = args.starting_block.map(|block| Cursor { + order_key: block, + unique_key: Vec::new(), + }); + + let stream = client + .stream_data(StreamDataRequest { + filter: vec![filter.encode_to_vec()], + starting_cursor, + ..Default::default() + }) + .await + .change_context(BenchmarkError)? + .into_inner() + .take_until(async move { ct.cancelled().await }); + + tokio::pin!(stream); + + let mut stats = S::new(index); + + let mut last_print = Instant::now(); + let print_interval = Duration::from_secs(10); + + while let Some(message) = stream.try_next().await.change_context(BenchmarkError)? { + use apibara_dna_protocol::dna::stream::stream_data_response::Message as ProtoMessage; + if let Some(ProtoMessage::Data(data_message)) = message.message { + let block_number = data_message + .cursor + .as_ref() + .map(|c| c.order_key) + .unwrap_or_default(); + + if let Some(block_data) = data_message.data.first() { + let block = S::Block::decode(block_data.as_ref()) + .change_context(BenchmarkError) + .attach_printable("failed to decode block")?; + stats.record(block); + + if last_print.elapsed() > print_interval { + last_print = Instant::now(); + stats.print_summary(); + } + } + + if let Some(end_block) = args.ending_block { + if block_number >= end_block { + info!(block_number, "reached ending block"); + break; + } + } + } + } + + stats.print_summary(); + + Ok(()) +} + +trait Stats { + type Block: Message + Default; + fn new(index: usize) -> Self; + fn record(&mut self, item: Self::Block); + fn print_summary(&self); +} + +struct EvmStats { + pub index: usize, + pub block_number: u64, + pub start: Instant, + pub bytes: u64, + pub blocks: u64, + pub transactions: u64, + pub receipts: u64, + pub logs: u64, + pub withdrawals: u64, +} + +impl Stats for EvmStats { + type Block = evm::Block; + + fn new(index: usize) -> Self { + Self { + index, + start: Instant::now(), + block_number: 0, + blocks: 0, + bytes: 0, + transactions: 0, + receipts: 0, + logs: 0, + withdrawals: 0, + } + } + + fn record(&mut self, block: evm::Block) { + self.block_number = block + .header + .as_ref() + .map(|h| h.block_number) + .unwrap_or_default(); + self.blocks += 1; + self.bytes += block.encoded_len() as u64; + + self.transactions += block.transactions.len() as u64; + self.receipts += block.receipts.len() as u64; + self.logs += block.logs.len() as u64; + self.withdrawals += block.withdrawals.len() as u64; + } + + fn print_summary(&self) { + let elapsed = self.start.elapsed(); + + let elapsed_sec = elapsed.as_secs_f64(); + let bytes = Byte::from_u64(self.bytes); + + info!( + latest_block = %self.block_number, + blocks = %self.blocks, + bytes = format!("{:#.6}", bytes), + transactions = %self.transactions, + receipts = %self.receipts, + logs = %self.logs, + withdrawals = %self.withdrawals, + elapsed = ?elapsed, + "[{}] evm stats (count)", + self.index, + ); + + let block_rate = self.blocks as f64 / elapsed_sec; + let byte_rate = Byte::from_f64(self.bytes as f64 / elapsed_sec).unwrap_or_default(); + let transaction_rate = self.transactions as f64 / elapsed_sec; + let receipt_rate = self.receipts as f64 / elapsed_sec; + let log_rate = self.logs as f64 / elapsed_sec; + let withdrawal_rate = self.withdrawals as f64 / elapsed_sec; + + info!( + blocks = %block_rate, + bytes = format!("{:#.6}/s", byte_rate), + transactions = %transaction_rate, + receipts = %receipt_rate, + logs = %log_rate, + withdrawals = %withdrawal_rate, + elapsed = ?elapsed, + "[{}] evm stats (rate)", + self.index, + ); + } +} + +struct StarknetStats { + pub index: usize, + pub start: Instant, + pub block_number: u64, + pub blocks: u64, + pub bytes: u64, + pub transactions: u64, + pub receipts: u64, + pub events: u64, + pub messages: u64, +} + +impl Stats for StarknetStats { + type Block = starknet::Block; + + fn new(index: usize) -> Self { + Self { + index, + start: Instant::now(), + block_number: 0, + blocks: 0, + bytes: 0, + transactions: 0, + receipts: 0, + events: 0, + messages: 0, + } + } + + fn record(&mut self, block: starknet::Block) { + self.block_number = block + .header + .as_ref() + .map(|h| h.block_number) + .unwrap_or_default(); + self.blocks += 1; + self.bytes += block.encoded_len() as u64; + + self.transactions += block.transactions.len() as u64; + self.receipts += block.receipts.len() as u64; + self.events += block.events.len() as u64; + self.messages += block.messages.len() as u64; + } + + fn print_summary(&self) { + let elapsed = self.start.elapsed(); + + let elapsed_sec = elapsed.as_secs_f64(); + let bytes = Byte::from_u64(self.bytes); + + info!( + latest_block = %self.block_number, + blocks = %self.blocks, + bytes = format!("{:#.6}", bytes), + transactions = %self.transactions, + receipts = %self.receipts, + logs = %self.events, + withdrawals = %self.messages, + elapsed = ?elapsed, + "[{}] starknet stats (count)", + self.index + ); + + let block_rate = self.blocks as f64 / elapsed_sec; + let byte_rate = Byte::from_f64(self.bytes as f64 / elapsed_sec).unwrap_or_default(); + let transaction_rate = self.transactions as f64 / elapsed_sec; + let receipt_rate = self.receipts as f64 / elapsed_sec; + let event_rate = self.events as f64 / elapsed_sec; + let message_rate = self.messages as f64 / elapsed_sec; + + info!( + index = self.index, + blocks = %block_rate, + bytes = format!("{:#.6}/s", byte_rate), + transactions = %transaction_rate, + receipts = %receipt_rate, + events = %event_rate, + messages = %message_rate, + elapsed = ?elapsed, + "[{}] starknet stats (rate)", + self.index + ); + } +} + +impl error_stack::Context for BenchmarkError {} + +impl std::fmt::Display for BenchmarkError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "benchmark error") + } +} diff --git a/charts/operator/.helmignore b/charts/operator/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/charts/operator/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/operator/Chart.yaml b/charts/operator/Chart.yaml deleted file mode 100644 index 639e4f60..00000000 --- a/charts/operator/Chart.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v2 -name: operator -description: The Apibara operator for Kubernetes -type: application -version: 0.1.0 -appVersion: "0.2.2" -home: https://www.apibara.com -sources: - - https://github.com/apibara/dna/tree/main/charts/operator diff --git a/charts/operator/README.md b/charts/operator/README.md deleted file mode 100644 index 585007a8..00000000 --- a/charts/operator/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Apibara Operator - -The Apibara Operator is a Kubernetes operator to deploy indexers. - -## Usage - -The chart is published to an OCI Helm repository. - -Inspect the chart with the `helm show` command: - -``` -$ helm show readme oci://quay.io/apibara-charts/operator ---- -Pulled: quay.io/apibara-charts/operator:0.1.0 -Digest: sha256:a248767bcfbb2973b616052dcc38b791f1b6ff13f2db40b61951183f85c0729e -# Apibara Operator - -The Apibara Operator is a Kubernetes operator to deploy indexers. -``` - -Install the chart with `helm install`. - -``` -$ helm install capy oci://quay.io/apibara-charts/operator ---- -Pulled: quay.io/apibara-charts/operator:0.1.0 -Digest: sha256:a248767bcfbb2973b616052dcc38b791f1b6ff13f2db40b61951183f85c0729e -NAME: capy -LAST DEPLOYED: Fri May 10 21:00:30 2024 -NAMESPACE: default -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -capy-operator has been installed. Check its status by running: - - kubectl --namespace default get pods - -``` - -Customize the release using `values.yaml` as usual. diff --git a/charts/operator/crds/custom-resource-definitions.yaml b/charts/operator/crds/custom-resource-definitions.yaml deleted file mode 100644 index bf826f3c..00000000 --- a/charts/operator/crds/custom-resource-definitions.yaml +++ /dev/null @@ -1,1373 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: indexers.apibara.com -spec: - group: apibara.com - names: - categories: [] - kind: Indexer - plural: indexers - shortNames: - - indexer - singular: indexer - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .status.phase - name: Status - type: string - - jsonPath: .status.instanceName - name: Instance - type: string - - jsonPath: .status.restartCount - name: Restarts - type: number - name: v1alpha2 - schema: - openAPIV3Schema: - description: Auto-generated derived type for IndexerSpec via `CustomResource` - properties: - spec: - description: Run an indexer. - properties: - env: - description: List of environment variables to set in the indexer container. - items: - description: EnvVar represents an environment variable present in a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - optional: - description: Specify whether the ConfigMap or its key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format of the exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - nullable: true - type: array - sink: - anyOf: - - required: - - type - - required: - - image - description: Sink to run. - properties: - args: - description: Arguments passed to the sink. - items: - type: string - nullable: true - type: array - image: - type: string - script: - description: Path to the script to run. - type: string - type: - type: string - required: - - script - type: object - source: - description: Indexer source code. - oneOf: - - required: - - gitHub - - required: - - volume - properties: - gitHub: - description: Clone the indexer repository from GitHub. - properties: - accessTokenEnvVar: - description: Environment variable containing the GitHub access token. - nullable: true - type: string - gitCleanFlags: - description: Additional flags to pass to `git clean`. - nullable: true - type: string - gitCloneFlags: - description: Additional flags to pass to `git clone`. - nullable: true - type: string - owner: - description: GitHub repository owner, e.g. `my-org`. - type: string - repo: - description: GitHub repository name, e.g. `my-indexer`. - type: string - revision: - description: Git revision, e.g. `main` or `a746ab`. - type: string - subpath: - description: Run the indexer from the specified subpath of the repository, e.g. `/packages/indexer`. - nullable: true - type: string - required: - - owner - - repo - - revision - type: object - volume: - description: Use source code from a mounted volume. - properties: - path: - description: |- - Path to the indexer source code, e.g. `/myvolume`. - - Use this option with the `volumes` field to mount a volume containing the indexer source code. - type: string - required: - - path - type: object - type: object - volumes: - description: List of volumes that can be mounted by containers belonging to the indexer. - items: - properties: - volume: - description: Volume to mount. - properties: - awsElasticBlockStore: - description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - partition: - description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk in the blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in the blob storage - type: string - fsType: - description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' - type: string - readOnly: - description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. - properties: - readOnly: - description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that contains Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime - properties: - monitors: - description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: - type: string - type: array - path: - description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' - type: string - readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - user: - description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - volumeID: - description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should populate this volume - properties: - defaultMode: - description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - items: - description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - optional: - description: optional specify whether the ConfigMap or its keys must be defined - type: boolean - type: object - csi: - description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). - properties: - driver: - description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - readOnly: - description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about the pod that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format of the exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - emptyDir: - description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. - - Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. - - A pod can use both types of ephemeral volumes and persistent volumes at the same time. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - - An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - - This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - - Required, must not be nil. - properties: - metadata: - description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - creationTimestamp: - description: |- - CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. - - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: |- - DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. - - Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. - items: - type: string - type: array - generateName: - description: |- - GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. - - If this field is specified and the generated name exists, the server will return a 409. - - Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - type: string - generation: - description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. - format: int64 - type: integer - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object. - items: - description: ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to. - properties: - apiVersion: - description: APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted. - type: string - fieldsType: - description: 'FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"' - type: string - fieldsV1: - description: FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. - type: object - manager: - description: Manager is an identifier of the workflow managing these fields. - type: string - operation: - description: Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. - type: string - subresource: - description: Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. - type: string - time: - description: Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over. - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: |- - Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. - - Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. - items: - description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: |- - An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. - - Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - selfLink: - description: 'Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.' - type: string - uid: - description: |- - UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. - - Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids - type: string - type: object - spec: - description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. - properties: - accessModes: - description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - limits: - additionalProperties: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - type: string - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - type: string - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - selector: - description: selector is a label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' - items: - type: string - type: array - wwids: - description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' - items: - type: string - type: array - type: object - flexVolume: - description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. - properties: - driver: - description: driver is the name of the driver to use for this volume. - type: string - fsType: - description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds extra command options if any.' - type: object - readOnly: - description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - required: - - driver - type: object - flocker: - description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - properties: - datasetName: - description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - partition: - description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' - properties: - directory: - description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - properties: - path: - description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi' - type: string - initiatorName: - description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - readOnly: - description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - targetPortal: - description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon Controller persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine - properties: - fsType: - description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources secrets, configmaps, and downward API - properties: - defaultMode: - description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: sources is the list of volume projections - items: - description: Projection that may be projected along with other supported volume types - properties: - configMap: - description: configMap information about the configMap data to project - properties: - items: - description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - optional: - description: optional specify whether the ConfigMap or its keys must be defined - type: boolean - type: object - downwardAPI: - description: downwardAPI information about the downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume file - items: - description: DownwardAPIVolumeFile represents information to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format of the exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: secret information about the secret data to project - properties: - items: - description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - optional: - description: optional field specify whether the Secret or its key must be defined - type: boolean - type: object - serviceAccountToken: - description: serviceAccountToken is information about the serviceAccountToken data to project - properties: - audience: - description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. - type: string - expirationSeconds: - description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. - format: int64 - type: integer - path: - description: path is the path relative to the mount point of the file to project the token into. - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime - properties: - group: - description: group to map volume access to Default is no group - type: string - readOnly: - description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes - type: string - tenant: - description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin - type: string - user: - description: user to map volume access to Defaults to serivceaccount user - type: string - volume: - description: volume is a string that references an already created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd' - type: string - image: - description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - items: - type: string - type: array - pool: - description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - user: - description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - properties: - fsType: - description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: gateway is the host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. - type: string - readOnly: - description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - sslEnabled: - description: sslEnabled Flag enable/disable SSL communication with Gateway, default false - type: boolean - storageMode: - description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage Pool associated with the protection domain. - type: string - system: - description: system is the name of the storage system as configured in ScaleIO. - type: string - volumeName: - description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - items: - description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' - format: int32 - type: integer - path: - description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - optional: - description: optional field specify whether the Secret or its keys must be defined - type: boolean - secretName: - description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. - properties: - fsType: - description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - volumeName: - description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. - type: string - volumeNamespace: - description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - properties: - fsType: - description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - volumeMount: - description: Volume mount specification. - properties: - mountPath: - description: Path within the container at which the volume should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - required: - - volume - - volumeMount - type: object - nullable: true - type: array - required: - - sink - - source - type: object - status: - description: Most recent status of the indexer. - nullable: true - properties: - conditions: - description: Conditions of the indexer. - items: - description: Condition contains details for one aspect of the current state of this API Resource. - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - nullable: true - type: array - instanceName: - description: The name of the container running the indexer. - nullable: true - type: string - phase: - description: Current phase of the indexer. - nullable: true - type: string - podCreated: - description: Creation timestamp of the indexer's pod. - format: date-time - nullable: true - type: string - restartCount: - description: Number of times the indexer container has restarted. - format: int32 - nullable: true - type: integer - statusServiceName: - description: Service name exposing the indexer's status. - nullable: true - type: string - type: object - required: - - spec - title: Indexer - type: object - served: true - storage: true - subresources: - status: {} - diff --git a/charts/operator/templates/NOTES.txt b/charts/operator/templates/NOTES.txt deleted file mode 100644 index 928d9b12..00000000 --- a/charts/operator/templates/NOTES.txt +++ /dev/null @@ -1,3 +0,0 @@ -{{ template "operator.fullname" . }} has been installed. Check its status by running: - - kubectl --namespace {{ template "operator.namespace" . }} get pods diff --git a/charts/operator/templates/_helpers.tpl b/charts/operator/templates/_helpers.tpl deleted file mode 100644 index ecfe3da3..00000000 --- a/charts/operator/templates/_helpers.tpl +++ /dev/null @@ -1,69 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "operator.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Namespace where to install the operator. -*/}} -{{- define "operator.namespace" -}} -{{- default .Release.Namespace .Values.namespace -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "operator.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "operator.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "operator.labels" -}} -helm.sh/chart: {{ include "operator.chart" . }} -{{ include "operator.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "operator.selectorLabels" -}} -app.kubernetes.io/name: {{ include "operator.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "operator.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "operator.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/charts/operator/templates/deployment.yaml b/charts/operator/templates/deployment.yaml deleted file mode 100644 index 04a8b097..00000000 --- a/charts/operator/templates/deployment.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "operator.fullname" . }} - namespace: {{ include "operator.namespace" . }} - labels: - {{- include "operator.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "operator.selectorLabels" . | nindent 6 }} - strategy: - type: Recreate - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "operator.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - automountServiceAccountToken: true - serviceAccountName: {{ include "operator.serviceAccountName" . }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if gt (len .Values.customArgs) 0 }} - args: - {{- toYaml .Values.customArgs | nindent 12 }} - {{- end }} - env: - - name: "RUST_LOG" - value: "INFO" - {{- if .Values.extraEnv }} - {{- toYaml .Values.extraEnv | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} diff --git a/charts/operator/templates/rbac-resources.yaml b/charts/operator/templates/rbac-resources.yaml deleted file mode 100644 index 49838d8b..00000000 --- a/charts/operator/templates/rbac-resources.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "operator.fullname" . }}-role -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - "*" -- apiGroups: - - "" - resources: - - endpoints - - configmaps - - secrets - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - - pods/status - - pods/finalizers - verbs: - - "*" -- apiGroups: - - "" - resources: - - services - - services/status - - services/finalizers - verbs: - - "*" -- apiGroups: - - "apibara.com" - resources: - - indexers - - indexers/status - - indexers/finalizers - verbs: - - "*" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "operator.fullname" . }}-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "operator.fullname" . }}-role -subjects: -- kind: ServiceAccount - name: {{ template "operator.serviceAccountName" . }} - namespace: {{ template "operator.namespace" . }} diff --git a/charts/operator/templates/service-account.yaml b/charts/operator/templates/service-account.yaml deleted file mode 100644 index 85e2a843..00000000 --- a/charts/operator/templates/service-account.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "operator.serviceAccountName" . }} - namespace: {{ include "operator.namespace" . }} - labels: - {{- include "operator.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/operator/templates/tests/indexers.yaml b/charts/operator/templates/tests/indexers.yaml deleted file mode 100644 index faf57920..00000000 --- a/charts/operator/templates/tests/indexers.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if .Values.test.enabled }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "operator.fullname" . }}-test-api-key - namespace: {{ include "operator.namespace" . }} -stringData: - production: {{ .Values.test.apiKey }} ---- -apiVersion: apibara.com/v1alpha2 -kind: Indexer -metadata: - name: {{ include "operator.fullname" . }}-test-indexer - namespace: {{ include "operator.namespace" . }} -spec: - source: - gitHub: - repo: dna - owner: apibara - revision: main - subpath: examples/console - sink: - script: starknet_to_console.js - type: console - env: - - name: AUTH_TOKEN - valueFrom: - secretKeyRef: - name: {{ include "operator.fullname" . }}-test-api-key - key: production -{{- end }} diff --git a/charts/operator/values.yaml b/charts/operator/values.yaml deleted file mode 100644 index e6f0991a..00000000 --- a/charts/operator/values.yaml +++ /dev/null @@ -1,50 +0,0 @@ -image: - repository: quay.io/apibara/operator - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - -# namespace: apibara-system - -replicaCount: 1 - -# Custom args for the operator, e.g. `["--namespace", "my-namespace"]` -customArgs: - - start - -# Additional environment variables to set -extraEnv: [] - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "apibara-operator-manager" - -podAnnotations: {} - -resources: - {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -# Values used for testing -test: - enabled: false - # DNA auth token - apiKey: "" diff --git a/cli/CHANGELOG.md b/cli/CHANGELOG.md deleted file mode 100644 index f9dc7f05..00000000 --- a/cli/CHANGELOG.md +++ /dev/null @@ -1,119 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Common Changelog](https://common-changelog.org/), and -this project adheres to -[Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [0.5.1] - 2024-08-02 - -_Add support for filtering invoke transactions v3._ - -### Added - -- Add support for filtering invoke transactions v3. - -## [0.5.0] - 2024-05-23 - -_Improve `apibara test` command when working with floating point numbers._ - -### Added - -- Add a `testOptions` section to `apibara test` snapshots. -- Add the `testOptions.floatingPointDecimals` option to control up to how many - decimal places to compare floating point numbers in snapshots. - -## [0.4.2] - 2024-01-19 - -_Allow network access._ - -### Added - -- Enable the `--allow-net` flag to allow the indexer script to access network resources. - -## [0.4.1] - 2023-12-06 - -_Improve compatibility with new sinks._ - -### Changed - -- Forward all environment-related options to the sink. - -## [0.4.0] - 2023-11-10 - -_Update Starknet filter._ - -### Changed - -- Update the Starknet filter definition to support the new - `includeTransaction` and `includeReceipt` options. - -## [0.3.3] - 2023-10-25 - -_Minor quality of life improvements._ - -### Fixed - -- Don't attempt to install plugins (like sinks) from pre-release releases. -- Show the correct plugin installation command when trying to run an indexer - that requires a missing sink. - -## [0.3.2] - 2023-10-24 - -_Error message improvements._ - -### Changed - -- This version changes how errors are handled to improve error messages. - Errors now show more context and additional information that will help - developers debug their indexers. - -## [0.3.1] - 2023-10-17 - -_Minor bug fixes in the `apibara test` command._ - -### Fixed - -- Avoid storing sensitive information such as authentication tokens in the - test snapshots. - -## [0.3.0] - 2023-09-26 - -_Add the `apibara test` command._ - -### Added - -- Introduce a `test` command to test indexers. This command implements - snapshot testing for indexers. The first time you run it, it downloads data - from a live DNA stream and records the output of the script. After the first - run, it replays the saved stream and compares the output from the script with - the output in the snapshot. A test is successful if the outputs match. - -### Changed - -- The `plugins` command is now also available as `plugin`. - -## [0.2.0] - 2023-09-16 - -_Introduce sink status gRPC service._ - -### Changed - -- The status server is now a gRPC service. This service returns the sink - indexing status, the starting block, and the chain's current head block - from the upstream DNA service. -- The status server now binds on a random port. This means it's easier to run - multiple sinks at the same time. - -## [0.1.0] - 2023-08-08 - -_First tagged release 🎉_ - -[0.4.0]: https://github.com/apibara/dna/releases/tag/cli/v0.4.0 -[0.3.3]: https://github.com/apibara/dna/releases/tag/cli/v0.3.3 -[0.3.2]: https://github.com/apibara/dna/releases/tag/cli/v0.3.2 -[0.3.1]: https://github.com/apibara/dna/releases/tag/cli/v0.3.1 -[0.3.0]: https://github.com/apibara/dna/releases/tag/cli/v0.3.0 -[0.2.0]: https://github.com/apibara/dna/releases/tag/cli/v0.2.0 -[0.1.0]: https://github.com/apibara/dna/releases/tag/cli/v0.1.0 diff --git a/cli/Cargo.toml b/cli/Cargo.toml deleted file mode 100644 index 4a46dca8..00000000 --- a/cli/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "apibara-cli" -version = "0.5.1" -edition.workspace = true -license.workspace = true - -[[bin]] -name = "apibara" -path = "src/main.rs" -doc = false - -[dependencies] -apibara-observability = { path = "../observability" } -apibara-sink-common = { path = "../sinks/sink-common" } -apibara-sdk = { path = "../sdk" } -apibara-core = { path = "../core" } -apibara-script = { path = "../script" } -async-compression.workspace = true -clap.workspace = true -colored = "2.0.4" -dirs.workspace = true -error-stack.workspace = true -futures.workspace = true -octocrab = "0.29.1" -reqwest.workspace = true -serde.workspace = true -serde_json.workspace = true -tabled = "0.14.0" -tar = "0.4.40" -tokio.workspace = true -tokio-util.workspace = true -tokio-stream.workspace = true -similar-asserts = { version = "1.4.2", features = [ - "serde", -], git = "https://github.com/bigherc18/similar-asserts.git" } -walkdir = "2.3.3" -tracing.workspace = true -tempfile.workspace = true -float-cmp = "0.9.0" diff --git a/cli/src/error.rs b/cli/src/error.rs deleted file mode 100644 index 3852fc82..00000000 --- a/cli/src/error.rs +++ /dev/null @@ -1,11 +0,0 @@ -use std::fmt; - -#[derive(Debug)] -pub struct CliError; -impl error_stack::Context for CliError {} - -impl fmt::Display for CliError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("cli operation failed") - } -} diff --git a/cli/src/main.rs b/cli/src/main.rs deleted file mode 100644 index d9f47ee0..00000000 --- a/cli/src/main.rs +++ /dev/null @@ -1,45 +0,0 @@ -mod error; -mod paths; -mod plugins; -mod run; -mod test; - -use apibara_observability::init_opentelemetry; -use apibara_sink_common::apibara_cli_style; -use clap::{Parser, Subcommand}; -use error::CliError; -use error_stack::{Result, ResultExt}; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None, styles = apibara_cli_style())] -struct Cli { - #[command(subcommand)] - subcommand: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - /// Run an indexer script. - Run(run::RunArgs), - /// Manage plugins. - /// - /// Plugins are used to extend Apibara functionality, for example by adding new data sinks. - #[clap(alias = "plugin")] - Plugins(plugins::PluginsArgs), - /// Test an indexer script. - Test(test::TestArgs), -} - -#[tokio::main] -async fn main() -> Result<(), CliError> { - init_opentelemetry() - .change_context(CliError) - .attach_printable("failed to initialize opentelemetry")?; - - let args = Cli::parse(); - match args.subcommand { - Command::Run(args) => run::run(args).await, - Command::Plugins(args) => plugins::run(args).await, - Command::Test(args) => test::run(args).await, - } -} diff --git a/cli/src/paths.rs b/cli/src/paths.rs deleted file mode 100644 index 018cfc81..00000000 --- a/cli/src/paths.rs +++ /dev/null @@ -1,16 +0,0 @@ -use std::{env, path::PathBuf}; - -/// Returns the local plugins directory. -pub fn plugins_dir() -> PathBuf { - apibara_dir().join("plugins") -} - -/// Returns the local apibara directory. -pub fn apibara_dir() -> PathBuf { - match env::var("APIBARA_HOME") { - Err(_) => dirs::data_local_dir() - .expect("local data directory") - .join("apibara"), - Ok(path) => PathBuf::from(path), - } -} diff --git a/cli/src/plugins.rs b/cli/src/plugins.rs deleted file mode 100644 index 33f2f030..00000000 --- a/cli/src/plugins.rs +++ /dev/null @@ -1,331 +0,0 @@ -use std::{ - env, fs, - io::ErrorKind, - os::unix::prelude::PermissionsExt, - path::{Path, PathBuf}, - process, -}; - -use async_compression::tokio::bufread::GzipDecoder; -use clap::{Args, Subcommand}; -use colored::*; -use error_stack::{Result, ResultExt}; -use futures::stream::TryStreamExt; -use reqwest::Url; -use tabled::{settings::Style, Table, Tabled}; -use tokio_util::io::StreamReader; - -use crate::{error::CliError, paths::plugins_dir}; - -static GITHUB_REPO_ORG: &str = "apibara"; -static GITHUB_REPO_NAME: &str = "dna"; - -#[derive(Debug, Args)] -pub struct PluginsArgs { - #[command(subcommand)] - subcommand: Command, -} - -#[derive(Debug, Subcommand)] -pub enum Command { - /// Install a new plugin. - Install(InstallArgs), - /// List all installed plugins. - List(ListArgs), - /// Remove an installed plugin. - Remove(RemoveArgs), -} - -#[derive(Debug, Tabled)] -#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] -struct PluginInfo { - name: String, - kind: String, - version: String, -} - -#[derive(Debug, Args)] -pub struct InstallArgs { - /// The name of the plugin to install, e.g. `sink-postgres`. - name: Option, - /// Install the plugin from the given file. - #[arg(long, short = 'f')] - file: Option, -} - -#[derive(Debug, Args)] -pub struct ListArgs {} - -#[derive(Debug, Args)] -pub struct RemoveArgs { - /// The type of plugin to remove, e.g. `sink`. - kind: String, - /// The name of the plugin to remove, e.g. `mongo`. - name: String, -} - -pub async fn run(args: PluginsArgs) -> Result<(), CliError> { - match args.subcommand { - Command::Install(args) => run_install(args).await, - Command::List(args) => run_list(args), - Command::Remove(args) => run_remove(args), - } -} - -async fn run_install(args: InstallArgs) -> Result<(), CliError> { - let dir = plugins_dir(); - fs::create_dir_all(dir) - .change_context(CliError) - .attach_printable("failed to created plugins directory")?; - let cwd = env::current_dir().change_context(CliError)?; - - if let Some(file) = args.file { - install_from_file(cwd.join(file))?; - } else if let Some(name) = args.name { - install_from_github(name).await?; - } - - Ok(()) -} - -async fn install_from_github(name: String) -> Result<(), CliError> { - let (kind, name) = name - .split_once('-') - .ok_or(CliError) - .attach_printable("Plugin name must be in the format -")?; - - let releases = octocrab::instance() - .repos(GITHUB_REPO_ORG, GITHUB_REPO_NAME) - .releases() - .list() - .per_page(50) - .send() - .await - .change_context(CliError) - .attach_printable("failed to fetch GitHub releases")?; - - let tag_prefix = format!("{}-{}/", kind, name); - let mut plugin_release = None; - let all_releases = octocrab::instance() - .all_pages(releases) - .await - .change_context(CliError) - .attach_printable("failed to fetch GitHub releases")?; - for release in all_releases { - if !release.prerelease && release.tag_name.starts_with(&tag_prefix) { - plugin_release = Some(release); - break; - } - } - - let plugin_release = plugin_release.ok_or(CliError).attach_printable_lazy(|| { - format!( - "No release found for plugin {}-{}. Did you spell it correctly?", - kind, name - ) - })?; - - println!( - "Found release {}", - plugin_release - .name - .unwrap_or(plugin_release.tag_name) - .green() - ); - - let info = PluginInfo::from_kind_name(kind.to_string(), name.to_string()); - - let artifact_name = info.artifact_name(env::consts::OS, env::consts::ARCH); - let asset = plugin_release - .assets - .iter() - .find(|asset| asset.name == artifact_name) - .ok_or(CliError) - .attach_printable_lazy(|| { - format!( - "No asset found for plugin {}-{} for your platform. OS={}, ARCH={}", - kind, - name, - env::consts::OS, - env::consts::ARCH - ) - })?; - - println!("Downloading {}...", asset.name.blue()); - - let target = plugins_dir().join(info.binary_name()); - download_artifact_to_path(asset.browser_download_url.clone(), &target).await?; - - println!("Plugin {} installed to {}", info.name, target.display()); - - Ok(()) -} - -fn install_from_file(file: impl AsRef) -> Result<(), CliError> { - let (name, version) = get_binary_name_version(&file)?; - - println!("Installing {} v{}", name, version); - - let target = plugins_dir().join(name); - // Copy the binary content to a new file to avoid copying the permissions. - let content = fs::read(&file) - .change_context(CliError) - .attach_printable_lazy(|| format!("failed to read content of file {:?}", file.as_ref()))?; - fs::write(&target, content) - .change_context(CliError) - .attach_printable_lazy(|| format!("failed to write plugin to file {:?}", &target))?; - fs::set_permissions(&target, fs::Permissions::from_mode(0o755)) - .change_context(CliError) - .attach_printable_lazy(|| { - format!("failed to update plugin permissions at {:?}", &target) - })?; - - println!("Plugin installed to {}", target.display()); - - Ok(()) -} - -fn run_list(_args: ListArgs) -> Result<(), CliError> { - let dir = plugins_dir(); - let plugins = get_plugins(dir)?; - - let table = Table::new(plugins).with(Style::rounded()).to_string(); - println!("{}", table); - - Ok(()) -} - -fn run_remove(args: RemoveArgs) -> Result<(), CliError> { - let dir = plugins_dir(); - let plugin = PluginInfo::from_kind_name(args.kind, args.name); - let plugin_path = dir.join(plugin.binary_name()); - - let (name, version) = get_binary_name_version(&plugin_path)?; - - println!("Removing {} v{}", name, version); - fs::remove_file(plugin_path.clone()) - .change_context(CliError) - .attach_printable_lazy(|| format!("failed to remove plugin at {:?}", plugin_path))?; - - Ok(()) -} - -fn get_plugins(dir: impl AsRef) -> Result, CliError> { - if !dir.as_ref().is_dir() { - return Ok(vec![]); - } - - let mut plugins = Vec::default(); - for file in fs::read_dir(dir).change_context(CliError)? { - let file = file.change_context(CliError)?; - - let metadata = file.metadata().change_context(CliError)?; - if !metadata.is_file() || !metadata.permissions().mode() & 0o111 != 0 { - eprintln!( - "{} {:?}", - "Plugins directory contains non-executable file".yellow(), - file.path() - ); - continue; - } - - let (name, version) = get_binary_name_version(file.path()).attach_printable_lazy(|| { - format!( - "Failed to get plugin version: {}", - file.file_name().to_string_lossy() - ) - })?; - let info = PluginInfo::from_name_version(name, version)?; - plugins.push(info); - } - - Ok(plugins) -} - -/// Runs the given plugin binary to extract the name and version. -fn get_binary_name_version(file: impl AsRef) -> Result<(String, String), CliError> { - let output = process::Command::new(file.as_ref()) - .arg("--version") - .output() - .change_context(CliError)?; - - let output = String::from_utf8(output.stdout).change_context(CliError)?; - let (name, version) = output - .trim() - .split_once(' ') - .ok_or(CliError) - .attach_printable("Plugin --version output does not match spec")?; - Ok((name.to_string(), version.to_string())) -} - -impl PluginInfo { - pub fn from_kind_name(kind: String, name: String) -> Self { - Self { - name, - kind, - version: String::default(), - } - } - - pub fn from_name_version(name: String, version: String) -> Result { - let mut parts = name.splitn(3, '-'); - let _ = parts - .next() - .ok_or(CliError) - .attach_printable("Plugin name is empty")?; - let kind = parts - .next() - .ok_or(CliError) - .attach_printable("Plugin name does not contain a kind")? - .to_string(); - let name = parts - .next() - .ok_or(CliError) - .attach_printable("Plugin name does not contain a kind")? - .to_string(); - - Ok(Self { - name, - version, - kind, - }) - } - - pub fn binary_name(&self) -> String { - format!("apibara-{}-{}", self.kind, self.name) - } - - pub fn artifact_name(&self, os: &str, arch: &str) -> String { - format!("{}-{}-{}-{}.gz", self.kind, self.name, arch, os) - } -} - -async fn download_artifact_to_path(url: Url, dest: impl AsRef) -> Result<(), CliError> { - let response = reqwest::get(url.clone()) - .await - .change_context(CliError) - .attach_printable_lazy(|| format!("failed to GET {url}"))?; - let stream = response - .bytes_stream() - .map_err(|err| std::io::Error::new(ErrorKind::Other, err)); - - let stream_reader = StreamReader::new(stream); - let mut decompressed = GzipDecoder::new(stream_reader); - - let mut file = tokio::fs::File::create(&dest) - .await - .change_context(CliError) - .attach_printable_lazy(|| format!("failed to create file {:?}", dest.as_ref()))?; - - tokio::io::copy(&mut decompressed, &mut file) - .await - .change_context(CliError) - .attach_printable("failed to copy artifact content")?; - - file.set_permissions(fs::Permissions::from_mode(0o755)) - .await - .change_context(CliError) - .attach_printable("failed to set permissions on artifact file")?; - - Ok(()) -} diff --git a/cli/src/run.rs b/cli/src/run.rs deleted file mode 100644 index b3f4762f..00000000 --- a/cli/src/run.rs +++ /dev/null @@ -1,125 +0,0 @@ -use std::{io::ErrorKind, process}; - -use apibara_sink_common::{load_script, FullOptionsFromScript, ScriptOptions}; -use clap::Args; -use colored::*; -use error_stack::{Result, ResultExt}; -use serde::Deserialize; - -use crate::{error::CliError, paths::plugins_dir}; - -#[derive(Args, Debug)] -#[clap(trailing_var_arg = true, allow_hyphen_values = true)] -pub struct RunArgs { - /// The path to the indexer script. - script: String, - #[clap(flatten)] - transform: ScriptOptions, - /// Arguments forwarded to the indexer. - args: Vec, -} - -#[derive(Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -struct DummyOptions { - pub sink_type: String, -} - -pub async fn run(args: RunArgs) -> Result<(), CliError> { - // While not recommended, the script may return a different sink based on some env variable. We - // need to load the environment variables before loading the script. - let script_options = args - .transform - .load_environment_variables() - .change_context(CliError) - .attach_printable("failed to parse script options")? - .into_indexer_options(); - - let mut script = load_script(&args.script, script_options).change_context(CliError)?; - - // Load the configuration from the script, but we don't need the full options yet. - let configuration = script - .configuration::>() - .await - .change_context(CliError)?; - - // Delegate running the indexer to the sink command. - let sink_type = configuration.sink.sink_type; - let sink_command = get_sink_command(&sink_type); - - // Add back the script/transform arguments if specified. - // TODO: There must be a better way to do this. - let mut extra_args = args.args; - if let Some(allow_env) = args.transform.allow_env { - extra_args.push("--allow-env".to_string()); - extra_args.push(allow_env.to_string_lossy().to_string()); - }; - if let Some(allow_env_from_env) = args.transform.allow_env_from_env { - extra_args.push("--allow-env-from-env".to_string()); - extra_args.push(allow_env_from_env.join(",").to_string()); - } - if let Some(allow_net) = args.transform.allow_net { - extra_args.push("--allow-net".to_string()); - extra_args.push(allow_net.join(",").to_string()); - }; - if let Some(allow_read) = args.transform.allow_read { - extra_args.push("--allow-read".to_string()); - extra_args.push(allow_read.join(",").to_string()); - }; - if let Some(allow_write) = args.transform.allow_write { - extra_args.push("--allow-write".to_string()); - extra_args.push(allow_write.join(",").to_string()); - }; - if let Some(transform_timeout) = args.transform.script_transform_timeout_seconds { - extra_args.push("--script-transform-timeout-seconds".to_string()); - extra_args.push(transform_timeout.to_string()); - } - if let Some(load_timeout) = args.transform.script_load_timeout_seconds { - extra_args.push("--script-load-timeout-seconds".to_string()); - extra_args.push(load_timeout.to_string()); - } - - let command_res = process::Command::new(sink_command) - .arg("run") - .arg(args.script) - .args(extra_args) - .spawn(); - - match command_res { - Ok(mut child) => { - child.wait().change_context(CliError)?; - Ok(()) - } - Err(err) => { - if let ErrorKind::NotFound = err.kind() { - eprintln!( - "{} {} {}", - "Sink".red(), - sink_type, - "is not installed".red() - ); - eprintln!( - "Install it with {} or by adding it to your $PATH", - format!("`apibara plugins install sink-{}`", sink_type).green() - ); - std::process::exit(1); - } - Err(err) - .change_context(CliError) - .attach_printable("error while running sink") - } - } -} - -fn get_sink_command(sink_type: &str) -> String { - let dir = plugins_dir(); - let binary = format!("apibara-sink-{}", sink_type); - - // If the user hasn't installed the plugin, try to invoke from path. - let installed = dir.join(&binary); - if installed.exists() { - return installed.to_string_lossy().to_string(); - } else { - binary - } -} diff --git a/cli/src/test/compare.rs b/cli/src/test/compare.rs deleted file mode 100644 index 9dffbcef..00000000 --- a/cli/src/test/compare.rs +++ /dev/null @@ -1,67 +0,0 @@ -use float_cmp::approx_eq; -use serde_json::Value; - -use super::snapshot::TestOptions; - -const DEFAULT_FP_DECIMALS: i64 = 20; - -/// Compare two outputs, taking into account test options like floating point precision. -pub fn outputs_are_equal(expected: &Value, actual: &Value, options: &TestOptions) -> bool { - let visitor = EqualityVisitor::from_options(options); - visitor.eq(expected, actual) -} - -struct EqualityVisitor { - floating_point_decimals: Option, -} - -impl EqualityVisitor { - pub fn from_options(options: &TestOptions) -> Self { - Self { - floating_point_decimals: options.floating_point_decimals, - } - } - - pub fn eq(&self, expected: &Value, actual: &Value) -> bool { - match (expected, actual) { - (Value::Number(expected), Value::Number(actual)) => self.number_eq(expected, actual), - (Value::Array(expected), Value::Array(actual)) => self.array_eq(expected, actual), - (Value::Object(expected), Value::Object(actual)) => self.object_eq(expected, actual), - (expected, actual) => expected == actual, - } - } - - fn number_eq(&self, expected: &serde_json::Number, actual: &serde_json::Number) -> bool { - let decimals = self.floating_point_decimals.unwrap_or(DEFAULT_FP_DECIMALS); - - match (expected.as_f64(), actual.as_f64()) { - (Some(expected), Some(actual)) => { - let epsilon = 10.0_f64.powi(-decimals as i32); - approx_eq!(f64, expected, actual, epsilon = epsilon) - } - _ => expected == actual, - } - } - - fn array_eq(&self, expected: &[Value], actual: &[Value]) -> bool { - expected.len() == actual.len() - && expected - .iter() - .zip(actual.iter()) - .all(|(expected, actual)| self.eq(expected, actual)) - } - - fn object_eq( - &self, - expected: &serde_json::Map, - actual: &serde_json::Map, - ) -> bool { - expected.len() == actual.len() - && expected - .iter() - .all(|(key, expected)| match actual.get(key) { - Some(actual) => self.eq(expected, actual), - None => false, - }) - } -} diff --git a/cli/src/test/error.rs b/cli/src/test/error.rs deleted file mode 100644 index e9d83205..00000000 --- a/cli/src/test/error.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::io::Write; - -use colored::Colorize; -use serde_json::Value; -use similar_asserts::serde_impl::Debug as SimilarAssertsDebug; -use similar_asserts::SimpleDiff; -use tracing::warn; - -const TRUNCATE_THRESHOLD: usize = 1000; -const DIFF_TRUNCATE_THRESHOLD: usize = 2000; - -fn truncate(s: &str, threshold: usize, write_tempfile: bool) -> String { - if s.chars().count() <= threshold { - return s.to_owned(); - } - - let mut full_output_hint = "".to_owned(); - - if write_tempfile { - match tempfile::Builder::new() - .prefix("apibara-test-diff-") - .suffix(".patch") - .tempfile() - .map_err(|err| err.to_string()) - .and_then(|keep| keep.keep().map_err(|err| err.to_string())) - .and_then(|(mut file, path)| match file.write_all(s.as_bytes()) { - Ok(_) => Ok(path), - Err(err) => Err(err.to_string()), - }) { - Ok(path) => full_output_hint = format!(", full output written to `{}`", path.display()), - Err(err) => { - warn!(err =? err, "Cannot create tempfile to save long diff"); - } - } - } - - let start = &s[..threshold / 2]; - let end = &s[s.len() - threshold / 2..]; - - let truncated_prefix = format!("\n", full_output_hint).yellow(); - - let truncated_msg = format!( - "\n ... \n {} chars truncated{}\n ... \n", - s.chars().count() - threshold, - full_output_hint, - ) - .yellow(); - - format!("{truncated_prefix} {start} {truncated_msg} {end}") -} - -pub fn get_assertion_error(expected_outputs: &[Value], found_outputs: &[Value]) -> String { - let left = format!("{:#?}", SimilarAssertsDebug(&expected_outputs)); - let right = format!("{:#?}", SimilarAssertsDebug(&found_outputs)); - - let left_label = "expected"; - let right_label = "found"; - let label_padding = left_label.chars().count().max(right_label.chars().count()); - - let diff = SimpleDiff::from_str(left.as_str(), right.as_str(), left_label, right_label); - - let assert_fail = format!( - "assertion failed: `({} == {})`'\n {:, - /// The number of blocks to stream. - #[arg(long, short = 'b')] - num_batches: Option, - /// Regenerate the snapshot even if it already exists. - #[arg(long, short = 'o', default_value_t = false)] - r#override: bool, - /// The name of the snapshot. - #[arg(long, short = 'n')] - name: Option, - /// Override the starting block from the script. - #[arg(long, short, env)] - starting_block: Option, - #[clap(flatten)] - stream_options: StreamOptions, - #[clap(flatten)] - dotenv_options: ScriptOptions, -} - -fn validate_args(args: &TestArgs) -> Result<(), CliError> { - if let Some(name) = &args.name { - let is_invalid = name.contains(|c| { - c == '/' - || c == '.' - || c == '\\' - || c == ':' - || c == '*' - || c == '?' - || c == '"' - || c == '<' - || c == '>' - || c == '|' - }); - if is_invalid { - return Err(CliError).attach_printable( - r#"Invalid name `{name}`, name should not contain /, ., \, :, *, ?, ", <, >, or | as it'll be used to construct the snapshot path"# - ); - } - } - - if let Some(num_batches) = args.num_batches { - if num_batches < 1 { - return Err(CliError).attach_printable_lazy(|| { - format!("Invalid number of blocks `{num_batches}`, it should be > 0") - }); - } - } - - if let Some(path) = &args.path { - // Think about using try_exists instead - if !path.exists() { - return Err(CliError).attach_printable_lazy(|| { - format!( - "Invalid path: `{}`, no such file or directory", - &path.display() - ) - }); - } - } - - Ok(()) -} - -pub fn warn_ignored_args(args: &TestArgs) { - if args.starting_block.is_some() - || args.num_batches.is_some() - || args.r#override - || args.stream_options.stream_url.is_some() - || args.stream_options.max_message_size.is_some() - || args.stream_options.metadata.is_some() - { - warn!( - "The following arguments are ignored: --starting-block, --num-batches, \ - --override, --stream-url, --max-message-size, --metadata when running tests, \ - if you want to generate a snapshot with different options, \ - use the --override flag or give it a name with --name" - ) - } -} - -pub async fn run(args: TestArgs) -> Result<(), CliError> { - validate_args(&args)?; - - match &args.path { - Some(path) => { - // Think about using try_exists instead - if !path.exists() { - return Err(CliError).attach_printable_lazy(|| { - format!( - "Invalid path: `{}`, no such file or directory", - &path.display() - ) - }); - } - - if path.is_dir() { - warn_ignored_args(&args); - run::run_all_tests(&path, &args.dotenv_options, None).await?; - return Ok(()); - } - - let extension = Path::new(&path) - .extension() - .ok_or(CliError) - .attach_printable_lazy(|| format!("Invalid path: `{}`", path.display()))?; - - match extension.to_str().unwrap() { - "json" => { - warn_ignored_args(&args); - run::run_single_test(path, None, None, &args.dotenv_options).await?; - }, - "js" | "ts" => { - let snapshot_path = args.name.clone() - .map(|name| Path::new(SNAPSHOTS_DIR).join(name).with_extension("json")) - .unwrap_or(snapshot::get_snapshot_path(path)?); - - if args.r#override || !snapshot_path.exists() { - run::run_generate_snapshot( - path, - &snapshot_path, - args.starting_block, - args.num_batches, - &args.stream_options, - &args.dotenv_options, - ).await?; - } else { - warn_ignored_args(&args); - if args.name.is_some() { - run::run_single_test(&snapshot_path, None, Some(path), &args.dotenv_options).await?; - } else { - run::run_all_tests(SNAPSHOTS_DIR, &args.dotenv_options, Some(path)).await?; - } - } - } - _ => return Err(CliError).attach_printable_lazy(|| format!( - "Invalid file extension: `{}`, must be a .json for snapshots or .js / .ts for scripts", - path.display() - )), - } - } - None => { - warn_ignored_args(&args); - run::run_all_tests(SNAPSHOTS_DIR, &args.dotenv_options, None).await?; - } - } - - Ok(()) -} diff --git a/cli/src/test/run.rs b/cli/src/test/run.rs deleted file mode 100644 index f731b42c..00000000 --- a/cli/src/test/run.rs +++ /dev/null @@ -1,387 +0,0 @@ -use std::fs::File; -use std::io::{BufWriter, Write}; - -use std::{fs, path::Path}; - -use tracing::warn; -use walkdir::{DirEntry, WalkDir}; - -use apibara_sink_common::{ - load_script, OptionsFromScript, ScriptOptions, StreamConfigurationOptions, StreamOptions, -}; -use colored::*; -use error_stack::{Result, ResultExt}; -use similar_asserts::serde_impl::Debug as SimilarAssertsDebug; -use similar_asserts::SimpleDiff; - -use crate::error::CliError; -use crate::test::error::get_assertion_error; -use crate::test::snapshot::{Snapshot, SnapshotGenerator}; - -use super::compare::outputs_are_equal; - -const DEFAULT_NUM_BATCHES: usize = 1; - -fn to_relative_path(path: &Path) -> &Path { - let current_dir = std::env::current_dir().unwrap(); - if let Ok(stripped) = path.strip_prefix(¤t_dir) { - stripped - } else { - path - } -} - -#[derive(Debug)] -pub enum TestResult { - Passed, - Failed { message: String }, -} - -pub async fn run_single_test( - snapshot_path: &Path, - snapshot: Option, - script_path: Option<&Path>, - dotenv_options: &ScriptOptions, -) -> Result { - let snapshot_path_display = to_relative_path(snapshot_path).display(); - - println!( - "{} test `{}` ... ", - "Running".green().bold(), - snapshot_path_display - ); - - let snapshot = if let Some(snapshot) = snapshot { - snapshot - } else { - let file = fs::File::open(snapshot_path) - .change_context(CliError) - .attach_printable_lazy(|| { - format!("Cannot open snapshot file `{}`", snapshot_path_display) - })?; - - let snapshot: Snapshot = serde_json::from_reader(file) - .change_context(CliError) - .attach_printable_lazy(|| { - format!( - "Cannot decode json file as a Snapshot `{}`", - snapshot_path_display - ) - })?; - snapshot - }; - - run_test(snapshot, script_path, dotenv_options).await -} - -async fn run_test( - snapshot: Snapshot, - script_path: Option<&Path>, - script_options: &ScriptOptions, -) -> Result { - let hint = - "rerun with --override to regenerate the snapshot or change the snapshot name with --name"; - - if let Some(script_path) = script_path { - if snapshot.script_path != script_path { - let message = format!( - "Snapshot generated with a different script: `{}`, {}", - snapshot.script_path.display(), - hint - ); - return Ok(TestResult::Failed { message }); - } - } - - let script_path_str = snapshot.script_path.to_string_lossy().to_string(); - - let script_options = script_options - .load_environment_variables() - .change_context(CliError)? - .into_indexer_options(); - - let mut script = load_script(&script_path_str, script_options).change_context(CliError)?; - - let filter = &script - .configuration::() - .await - .change_context(CliError)? - .stream_configuration - .filter; - - let snapshot_filter = &snapshot.stream_configuration_options.filter; - - if snapshot_filter != filter { - let left = format!("{:#?}", SimilarAssertsDebug(&snapshot_filter)); - let right = format!("{:#?}", SimilarAssertsDebug(&filter)); - - let diff = SimpleDiff::from_str(left.as_str(), right.as_str(), "expected", "found"); - - let message = format!( - "Snapshot generated with a different filter, {}\n{}", - hint, &diff - ); - return Ok(TestResult::Failed { message }); - } - - let mut expected_outputs = vec![]; - let mut found_outputs = vec![]; - - let mut has_error = false; - for message in snapshot.stream { - let input = message["input"] - .as_array() - .ok_or(CliError) - .attach_printable("snapshot input should be an array")? - .clone(); - let expected_output = message["output"].clone(); - - let found_output = script - .transform(input) - .await - .change_context(CliError) - .attach_printable("failed to transform data")?; - - if !outputs_are_equal(&expected_output, &found_output, &snapshot.test_options) { - has_error = true; - } - - expected_outputs.push(expected_output); - found_outputs.push(found_output); - } - - if has_error { - let message = get_assertion_error(&expected_outputs, &found_outputs); - Ok(TestResult::Failed { message }) - } else { - Ok(TestResult::Passed) - } -} - -/// Merge stream_options and stream_configuration_options from CLI, script and -/// snapshot if it exists -/// Priority: CLI > snapshot > script except for filter which is exclusively configured from script -pub async fn merge_options( - starting_block: Option, - num_batches: Option, - cli_stream_options: &StreamOptions, - script_options: OptionsFromScript, - snapshot: Option, -) -> Result<(StreamOptions, StreamConfigurationOptions, usize), CliError> { - if let Some(snapshot) = snapshot { - let stream_options = cli_stream_options - .clone() - .merge(snapshot.stream_options) - .merge(script_options.stream); - - let mut stream_configuration_options = snapshot - .stream_configuration_options - .merge(script_options.stream_configuration.clone()); - - stream_configuration_options.starting_block = - starting_block.or(stream_configuration_options.starting_block); - - stream_configuration_options.filter = script_options.stream_configuration.filter; - - let num_batches = num_batches.unwrap_or(snapshot.num_batches); - - Ok((stream_options, stream_configuration_options, num_batches)) - } else { - let stream_options = cli_stream_options.clone().merge(script_options.stream); - - let mut stream_configuration_options = script_options.stream_configuration; - - stream_configuration_options.starting_block = - starting_block.or(stream_configuration_options.starting_block); - - let num_batches = num_batches.unwrap_or(DEFAULT_NUM_BATCHES); - - Ok((stream_options, stream_configuration_options, num_batches)) - } -} - -pub async fn run_generate_snapshot( - script_path: &Path, - snapshot_path: &Path, - starting_block: Option, - num_batches: Option, - cli_stream_options: &StreamOptions, - script_options: &ScriptOptions, -) -> Result<(), CliError> { - println!( - "{} snapshot `{}` ...", - "Generating".green().bold(), - to_relative_path(snapshot_path).display() - ); - - let script_path_str = script_path.to_string_lossy().to_string(); - let script_options = script_options - .load_environment_variables() - .change_context(CliError)? - .into_indexer_options(); - - let mut script = load_script(&script_path_str, script_options).change_context(CliError)?; - - let script_options = script - .configuration::() - .await - .change_context(CliError)?; - - let snapshot = if snapshot_path.exists() { - match fs::File::open(snapshot_path) { - Ok(file) => serde_json::from_reader(file).ok(), - Err(err) => { - warn!(err =? err, "Cannot read snapshot file to get previously used options `{}`", snapshot_path.display()); - None - } - } - } else { - None - }; - - let (stream_options, stream_configuration_options, num_batches) = merge_options( - starting_block, - num_batches, - cli_stream_options, - script_options, - snapshot, - ) - .await?; - - let snapshot = SnapshotGenerator::new( - script_path.to_owned(), - script, - num_batches, - stream_options, - stream_configuration_options, - ) - .generate() - .await?; - - if !&snapshot_path.parent().unwrap().exists() { - fs::create_dir_all(snapshot_path.parent().unwrap()).change_context(CliError)?; - } - - let file = File::create(snapshot_path).change_context(CliError)?; - let mut writer = BufWriter::new(file); - serde_json::to_writer_pretty(&mut writer, &snapshot).change_context(CliError)?; - writer.flush().change_context(CliError)?; - - let start_block = snapshot.stream[0]["cursor"]["orderKey"] - .as_u64() - .unwrap_or(0); - let end_block = &snapshot.stream.last().unwrap()["end_cursor"]["orderKey"] - .as_u64() - .unwrap(); - - let num_batches = snapshot.stream.len(); - let num_batches = if num_batches > 1 { - format!("{} batches ({} -> {})", num_batches, start_block, end_block) - } else { - format!("{} batch ({} -> {})", num_batches, start_block, end_block) - }; - - println!( - "{} snapshot successfully with {}", - "Generated".green().bold(), - num_batches.green().bold(), - ); - - Ok(()) -} - -pub async fn run_all_tests( - dir: impl AsRef, - dotenv_options: &ScriptOptions, - script_path: Option<&Path>, -) -> Result<(), CliError> { - let for_script = if let Some(script_path) = script_path { - format!(" for `{}`", to_relative_path(script_path).display()) - } else { - "".to_string() - }; - - println!( - "{} tests{} from `{}` ... ", - "Collecting".green().bold(), - for_script, - to_relative_path(dir.as_ref()).display(), - ); - - let snapshots: Vec<(DirEntry, Option)> = WalkDir::new(&dir) - .into_iter() - .filter_map(|e| e.ok()) - .filter(|e| e.path().extension().map(|e| e == "json").unwrap_or(false)) - .filter_map(|e| { - if let Some(script_path) = script_path { - let file = fs::File::open(e.path()); - match file { - Ok(file) => { - let snapshot: std::result::Result = - serde_json::from_reader(file); - - match snapshot { - Ok(snapshot) => { - if snapshot.script_path == script_path { - Some((e, Some(snapshot))) - } else { - None - } - } - Err(err) => { - warn!(err =? err, "Cannot decode json file as a Sanpshot `{}`", e.path().display()); - None - } - } - } - Err(err) => { - warn!(err =? err, "Cannot open snapshot file `{}`", e.path().display()); - None - } - } - } else { - Some((e, None)) - } - }) - .collect(); - - println!("{} {} files", "Collected".green().bold(), snapshots.len()); - - let mut num_passed_tests = 0; - let mut num_failed_tests = 0; - let mut num_error_tests = 0; - - for (snapshot_path, snapshot) in snapshots { - println!(); - match run_single_test(snapshot_path.path(), snapshot, None, dotenv_options).await { - Ok(TestResult::Passed) => { - println!("{}", "Test passed".green()); - num_passed_tests += 1; - } - Ok(TestResult::Failed { message }) => { - println!("{}\n", "Test failed".red()); - eprintln!("{}", message); - num_failed_tests += 1; - } - Err(err) => { - println!("{}\n", "Test error".red()); - eprintln!("{}", format!("{err:?}").bright_red()); - num_error_tests += 1 - } - }; - } - - let passed = format!("{} passed", num_passed_tests).green(); - let failed = format!("{} failed", num_failed_tests).red(); - let error = format!("{} error", num_error_tests).bright_red(); - - println!( - "\n{}: {}, {}, {}", - "Test result".bold(), - passed, - failed, - error - ); - - Ok(()) -} diff --git a/cli/src/test/snapshot.rs b/cli/src/test/snapshot.rs deleted file mode 100644 index ed7cae5a..00000000 --- a/cli/src/test/snapshot.rs +++ /dev/null @@ -1,195 +0,0 @@ -use std::path::{Path, PathBuf}; - -use apibara_core::starknet::v1alpha2::{Block, Filter}; -use apibara_script::Script; -use apibara_sdk::{configuration, ClientBuilder, DataMessage}; -use apibara_sink_common::{StreamConfigurationOptions, StreamOptions}; -use error_stack::{Result, ResultExt}; -use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; -use tokio_stream::StreamExt; -use tracing::debug; - -use crate::{error::CliError, test::SNAPSHOTS_DIR}; - -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -pub struct Snapshot { - pub script_path: PathBuf, - pub num_batches: usize, - pub stream_options: StreamOptions, - pub stream_configuration_options: StreamConfigurationOptions, - pub test_options: TestOptions, - pub stream: Vec, -} - -#[derive(Serialize, Deserialize, Debug, Default)] -#[serde(rename_all = "camelCase")] -pub struct TestOptions { - #[serde(skip_serializing_if = "Option::is_none")] - pub floating_point_decimals: Option, -} - -pub struct SnapshotGenerator { - script_path: PathBuf, - script: Script, - num_batches: usize, - stream_options: StreamOptions, - stream_configuration_options: StreamConfigurationOptions, -} - -pub fn get_snapshot_path(script_path: &Path) -> Result { - let file_stem = script_path - .file_stem() - .ok_or(CliError) - .attach_printable_lazy(|| format!("Invalid path `{}`", script_path.display()))?; - - let current_dir = std::env::current_dir() - .change_context(CliError) - .attach_printable("failed to get current directory")?; - - let snapshot_path = current_dir - .join(SNAPSHOTS_DIR) - .join(format!("{}.json", file_stem.to_string_lossy())); - - Ok(snapshot_path) -} - -impl SnapshotGenerator { - pub fn new( - script_path: PathBuf, - script: Script, - num_batches: usize, - stream_options: StreamOptions, - stream_configuration_options: StreamConfigurationOptions, - ) -> Self { - Self { - script_path, - script, - num_batches, - stream_options, - stream_configuration_options, - } - } - - pub async fn generate(mut self) -> Result { - let configuration = self.stream_configuration_options.as_starknet().unwrap(); - let stream_configuration = self - .stream_options - .clone() - .to_stream_configuration() - .change_context(CliError) - .attach_printable("failed to convert stream options")?; - - let (configuration_client, configuration_stream) = configuration::channel(128); - - configuration_client - .send(configuration) - .await - .change_context(CliError) - .attach_printable("failed to send configuration")?; - - let stream_client = ClientBuilder::default() - .with_max_message_size(stream_configuration.max_message_size_bytes.as_u64() as usize) - .with_metadata(stream_configuration.metadata.clone()) - .with_bearer_token(stream_configuration.bearer_token.clone()) - .connect(stream_configuration.stream_url.clone()) - .await - .change_context(CliError) - .attach_printable("failed to connect to DNA stream")?; - - let mut data_stream = stream_client - .start_stream::(configuration_stream) - .await - .change_context(CliError) - .attach_printable("failed to start DNA stream")?; - - let mut num_handled_blocks = 0; - - let mut stream: Vec = vec![]; - - let mut is_empty = true; - - loop { - tokio::select! { - maybe_message = data_stream.try_next() => { - match maybe_message.change_context(CliError)? { - None => { - println!("Data stream closed"); - break; - } - Some(message) => { - if num_handled_blocks >= self.num_batches { - break; - } - match message { - DataMessage::Data { - cursor, - end_cursor, - finality, - batch, - } => { - debug!("Adding data to snapshot: {:?}-{:?}", cursor, end_cursor); - - let input = batch - .into_iter() - .map(|b| serde_json::to_value(b).change_context(CliError)) - .collect::, _>>() - .attach_printable("failed to serialize batch data")?; - - if !input.is_empty() { - is_empty = false; - } - - let output = self.script.transform(input.clone()).await.change_context(CliError).attach_printable("failed to transform batch data")?; - - stream.push(json!({ - "cursor": cursor, - "end_cursor": end_cursor, - "finality": finality, - "input": input, - "output": output, - })); - } - DataMessage::Invalidate { cursor } => { - debug!("Ignoring invalidate: {:?}", cursor); - } - DataMessage::Heartbeat => { - debug!("Ignoring heartbeat"); - } - } - num_handled_blocks += 1; - } - } - } - } - } - - if is_empty { - return Err(CliError).attach_printable("Empty snapshot, no data found for the selected options (filter, starting_block, num_batches ...)"); - } - - let stream_options = sanitize_stream_options(&self.stream_options); - - Ok(Snapshot { - script_path: self.script_path, - num_batches: self.num_batches, - stream_options, - stream_configuration_options: self.stream_configuration_options, - test_options: TestOptions::default(), - stream, - }) - } -} - -/// Remove all the fields from the stream options that are not needed for the snapshot. -/// -/// This is done to avoid leaking sensitive information (e.g. the bearer token) in the snapshots. -fn sanitize_stream_options(options: &StreamOptions) -> StreamOptions { - StreamOptions { - stream_url: options.stream_url.clone(), - max_message_size: options.max_message_size.clone(), - timeout_duration_seconds: options.timeout_duration_seconds, - ..Default::default() - } -} diff --git a/common/Cargo.toml b/common/Cargo.toml new file mode 100644 index 00000000..ca9a2a53 --- /dev/null +++ b/common/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "apibara-dna-common" +version = "0.0.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true +license.workspace = true + +[lib] +name = "apibara_dna_common" +path = "src/lib.rs" + +[dependencies] +anyhow = "1.0.89" +apibara-etcd = { path = "../etcd" } +apibara-observability = { path = "../observability" } +apibara-dna-protocol = { path = "../protocol" } +aws-config = { version = "1.5.5", features = ["behavior-version-latest"] } +aws-sdk-s3 = "1.47.0" +bytes.workspace = true +byte-unit.workspace = true +clap.workspace = true +crc32fast = "1.4.2" +dirs.workspace = true +error-stack.workspace = true +etcd-client.workspace = true +foyer.workspace = true +futures.workspace = true +hex.workspace = true +memmap2.workspace = true +prost.workspace = true +rkyv.workspace = true +roaring.workspace = true +testcontainers.workspace = true +tokio.workspace = true +tokio-util.workspace = true +tokio-stream.workspace = true +tonic.workspace = true +tonic-health.workspace = true +tonic-reflection.workspace = true +tracing.workspace = true +zstd.workspace = true + +[dev-dependencies] +rand.workspace = true +tempfile.workspace = true +tempdir.workspace = true diff --git a/common/src/block_store.rs b/common/src/block_store.rs new file mode 100644 index 00000000..5447e359 --- /dev/null +++ b/common/src/block_store.rs @@ -0,0 +1,222 @@ +use anyhow::anyhow; +use bytes::Bytes; +use error_stack::{Result, ResultExt}; +use foyer::FetchState; + +use crate::{ + file_cache::{FileCache, FileFetch}, + fragment, + object_store::{GetOptions, ObjectETag, ObjectStore, PutOptions}, + segment::{SegmentGroup, SerializedSegment}, + Cursor, +}; + +static BLOCK_PREFIX: &str = "block"; +static SEGMENT_PREFIX: &str = "segment"; +static GROUP_PREFIX: &str = "group"; + +#[derive(Debug)] +pub struct BlockStoreError; + +/// Download blocks from the object store with a local cache. +#[derive(Clone)] +pub struct BlockStoreReader { + client: ObjectStore, + file_cache: FileCache, +} + +/// Upload blocks to the object store. +#[derive(Clone)] +pub struct BlockStoreWriter { + client: ObjectStore, +} + +impl BlockStoreReader { + pub fn new(client: ObjectStore, file_cache: FileCache) -> Self { + Self { client, file_cache } + } + + #[tracing::instrument(name = "block_store_get_block", skip_all, fields(cache_hit))] + pub fn get_block(&self, cursor: &Cursor) -> FileFetch { + let current_span = tracing::Span::current(); + let key = format_block_key(cursor); + + let fetch_block = { + let key = key.clone(); + move || { + let client = self.client.clone(); + async move { + match client.get(&key, GetOptions::default()).await { + Ok(response) => Ok(response.body), + Err(err) => Err(anyhow!(err)), + } + } + } + }; + let entry = self.file_cache.fetch(key, fetch_block); + + match entry.state() { + FetchState::Miss => current_span.record("cache_hit", 0), + _ => current_span.record("cache_hit", 1), + }; + + entry + } + + pub fn get_index_segment(&self, first_cursor: &Cursor) -> FileFetch { + self.get_segment(first_cursor, "index") + } + + #[tracing::instrument(name = "block_store_get_segment", skip_all, fields(name, cache_hit))] + pub fn get_segment(&self, first_cursor: &Cursor, name: impl Into) -> FileFetch { + let current_span = tracing::Span::current(); + let name = name.into(); + let key = format_segment_key(first_cursor, &name); + + current_span.record("name", &name); + + let fetch_segment = { + let key = key.clone(); + move || { + let client = self.client.clone(); + async move { + match client.get(&key, GetOptions::default()).await { + Ok(response) => Ok(response.body), + Err(err) => Err(anyhow!(err)), + } + } + } + }; + + let entry = self.file_cache.fetch(key, fetch_segment); + + match entry.state() { + FetchState::Miss => current_span.record("cache_hit", 0), + _ => current_span.record("cache_hit", 1), + }; + + entry + } + + #[tracing::instrument(name = "block_store_get_group", skip_all, fields(cache_hit))] + pub fn get_group(&self, cursor: &Cursor) -> FileFetch { + let current_span = tracing::Span::current(); + let key = format_group_key(cursor); + + let fetch_group = { + let key = key.clone(); + move || { + let client = self.client.clone(); + async move { + match client.get(&key, GetOptions::default()).await { + Ok(response) => Ok(response.body), + Err(err) => Err(anyhow!(err)), + } + } + } + }; + let entry = self.file_cache.fetch(key, fetch_group); + + match entry.state() { + FetchState::Miss => current_span.record("cache_hit", 0), + _ => current_span.record("cache_hit", 1), + }; + + entry + } +} + +impl BlockStoreWriter { + pub fn new(client: ObjectStore) -> Self { + Self { client } + } + + pub async fn put_block( + &self, + cursor: &Cursor, + block: &fragment::Block, + ) -> Result { + let serialized = rkyv::to_bytes::(block) + .change_context(BlockStoreError) + .attach_printable("failed to serialize block")?; + + let bytes = Bytes::copy_from_slice(serialized.as_slice()); + + let response = self + .client + .put(&format_block_key(cursor), bytes, PutOptions::default()) + .await + .change_context(BlockStoreError) + .attach_printable("failed to put block") + .attach_printable_lazy(|| format!("cursor: {}", cursor))?; + + Ok(response.etag) + } + + pub async fn put_segment( + &self, + first_cursor: &Cursor, + segment: SerializedSegment, + ) -> Result { + let response = self + .client + .put( + &format_segment_key(first_cursor, &segment.name), + segment.data, + PutOptions::default(), + ) + .await + .change_context(BlockStoreError) + .attach_printable("failed to put segment") + .attach_printable_lazy(|| format!("cursor: {}", first_cursor)) + .attach_printable_lazy(|| format!("segment name: {}", segment.name))?; + + Ok(response.etag) + } + + pub async fn put_group( + &self, + first_cursor: &Cursor, + group: &SegmentGroup, + ) -> Result { + let serialized = rkyv::to_bytes::(group) + .change_context(BlockStoreError) + .attach_printable("failed to serialize segment group")?; + + let bytes = Bytes::copy_from_slice(serialized.as_slice()); + + let response = self + .client + .put( + &format_group_key(first_cursor), + bytes, + PutOptions::default(), + ) + .await + .change_context(BlockStoreError) + .attach_printable("failed to put segment group") + .attach_printable_lazy(|| format!("cursor: {}", first_cursor))?; + + Ok(response.etag) + } +} + +fn format_block_key(cursor: &Cursor) -> String { + format!("{}/{:0>10}/{}", BLOCK_PREFIX, cursor.number, cursor.hash) +} + +fn format_segment_key(first_block: &Cursor, name: &str) -> String { + format!("{}/{:0>10}/{}", SEGMENT_PREFIX, first_block.number, name) +} + +fn format_group_key(first_block: &Cursor) -> String { + format!("{}/{:0>10}/index", GROUP_PREFIX, first_block.number) +} + +impl error_stack::Context for BlockStoreError {} + +impl std::fmt::Display for BlockStoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "block store error") + } +} diff --git a/common/src/chain.rs b/common/src/chain.rs new file mode 100644 index 00000000..a0a8cf86 --- /dev/null +++ b/common/src/chain.rs @@ -0,0 +1,733 @@ +use std::collections::BTreeMap; + +use error_stack::{Result, ResultExt}; +use rkyv::{with::AsVec, Archive, Deserialize, Serialize}; + +use crate::{Cursor, Hash}; + +#[derive(Clone, PartialEq, Eq, Hash, Archive, Serialize, Deserialize, Debug)] +pub struct BlockInfo { + pub number: u64, + pub hash: Hash, + pub parent: Hash, +} + +impl BlockInfo { + pub fn cursor(&self) -> Cursor { + Cursor { + number: self.number, + hash: self.hash.clone(), + } + } +} + +/// What action to take on reconnection. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ReconnectAction { + /// Continue from the provided cursor. + Continue, + /// There was a reorg while offline. The new head is provided. + OfflineReorg(Cursor), + /// The provided cursor is not part of the canonical chain or any reorg. + Unknown, +} + +pub type ReorgMap = BTreeMap; + +#[derive(Clone, PartialEq, Eq, Hash, Archive, Serialize, Deserialize)] +pub struct CanonicalBlock { + pub hash: Hash, + #[rkyv(with = AsVec)] + pub reorgs: ReorgMap, +} + +#[derive(Clone, PartialEq, Eq, Hash, Archive, Serialize, Deserialize)] +pub struct ExtraReorg { + pub block_number: u64, + #[rkyv(with = AsVec)] + pub reorgs: ReorgMap, +} + +#[derive(Clone, PartialEq, Eq, Hash, Archive, Serialize, Deserialize, Debug)] +pub struct CanonicalChainSegmentInfo { + pub first_block: Cursor, + pub last_block: Cursor, +} + +#[derive(Clone, PartialEq, Eq, Hash, Archive, Serialize, Deserialize)] +pub struct CanonicalChainSegment { + pub previous_segment: Option, + pub info: CanonicalChainSegmentInfo, + pub canonical: Vec, + pub extra_reorgs: Vec, +} + +#[derive(Debug)] +pub enum CanonicalChainError { + Builder, + View, +} + +#[derive(Clone)] +pub enum CanonicalChainBuilder { + Empty, + Building { + previous_segment: Option, + info: CanonicalChainSegmentInfo, + canonical: Vec, + reorgs: BTreeMap, + }, +} + +impl CanonicalChainBuilder { + pub fn new() -> Self { + CanonicalChainBuilder::Empty + } + + pub fn restore_from_segment( + segment: CanonicalChainSegment, + ) -> Result { + let previous_segment = segment.previous_segment; + let info = segment.info; + let mut canonical = Vec::with_capacity(segment.canonical.len()); + let mut reorgs: BTreeMap = BTreeMap::new(); + for (offset, canonical_block) in segment.canonical.into_iter().enumerate() { + let block_number = info.first_block.number + offset as u64; + + canonical.push(canonical_block.hash); + reorgs.insert(block_number, canonical_block.reorgs); + } + + Ok(Self::Building { + previous_segment, + info, + canonical, + reorgs, + }) + } + + pub fn info(&self) -> Option<&CanonicalChainSegmentInfo> { + match self { + CanonicalChainBuilder::Empty => None, + CanonicalChainBuilder::Building { info, .. } => Some(info), + } + } + + /// Returns the number of blocks in the segment. + pub fn segment_size(&self) -> usize { + match self { + CanonicalChainBuilder::Empty => 0, + CanonicalChainBuilder::Building { canonical, .. } => canonical.len(), + } + } + + pub fn can_grow(&self, block: &BlockInfo) -> bool { + match self { + CanonicalChainBuilder::Empty => true, + CanonicalChainBuilder::Building { info, .. } => { + let last_block = &info.last_block; + if last_block.hash.is_zero() { + true + } else { + last_block.number + 1 == block.number && last_block.hash == block.parent + } + } + } + } + + /// Add the given block to the segment. + pub fn grow(&mut self, block: BlockInfo) -> Result<(), CanonicalChainError> { + if !self.can_grow(&block) { + return Err(CanonicalChainError::Builder) + .attach_printable("block cannot be applied to the current segment"); + } + + match self { + CanonicalChainBuilder::Empty => { + // Initialize the segment builder. + let cursor = block.cursor(); + + *self = CanonicalChainBuilder::Building { + previous_segment: None, + info: CanonicalChainSegmentInfo { + first_block: cursor.clone(), + last_block: cursor, + }, + canonical: vec![block.hash], + reorgs: BTreeMap::new(), + }; + + Ok(()) + } + CanonicalChainBuilder::Building { + canonical, info, .. + } => { + info.last_block = block.cursor(); + canonical.push(block.hash); + + Ok(()) + } + } + } + + // Shrink the current segment to the given block. + // + // Returns the removed blocks. + // Notice that by design the genesis block cannot be removed. + pub fn shrink(&mut self, new_head: Cursor) -> Result, CanonicalChainError> { + let CanonicalChainBuilder::Building { + canonical, + info, + reorgs, + .. + } = self + else { + return Err(CanonicalChainError::Builder) + .attach_printable("tried to shrink an empty segment"); + }; + + if new_head.number < info.first_block.number { + return Err(CanonicalChainError::Builder) + .attach_printable("tried to shrink a segment to a block that is not in the segment") + .attach_printable_lazy(|| { + format!("first block number: {}", info.first_block.number) + }) + .attach_printable_lazy(|| format!("new head number: {}", new_head.number)); + } + + if new_head.number > info.last_block.number { + return Err(CanonicalChainError::Builder) + .attach_printable("tried to shrink a segment to a block that is not ingested yet") + .attach_printable_lazy(|| format!("last block number: {}", info.last_block.number)) + .attach_printable_lazy(|| format!("new head number: {}", new_head.number)); + } + + let new_head_index = (new_head.number - info.first_block.number) as usize; + + if new_head_index >= canonical.len() || canonical[new_head_index] != new_head.hash { + return Err(CanonicalChainError::Builder) + .attach_printable("inconsistent state: tried to shrink a segment to a block that is not in the segment"); + } + + // Nothing to remove. + if new_head_index == canonical.len() - 1 { + return Ok(Vec::new()); + } + + let mut removed = Vec::new(); + let first_removed_block_index = new_head_index + 1; + + for (offset, hash) in canonical[first_removed_block_index..].iter().enumerate() { + let block_number = + info.first_block.number + (first_removed_block_index + offset) as u64; + + removed.push(Cursor { + number: block_number, + hash: hash.clone(), + }); + + reorgs + .entry(block_number) + .or_default() + .insert(hash.clone(), new_head.clone()); + } + + info.last_block = new_head.clone(); + canonical.truncate(new_head_index + 1); + + // Sanity check. + assert!(canonical.len() == (info.last_block.number - info.first_block.number + 1) as usize); + + Ok(removed) + } + + // Returns the current builder's state ready for serialization. + pub fn current_segment(&self) -> Result { + let CanonicalChainBuilder::Building { + canonical, + info, + reorgs, + previous_segment, + } = self + else { + return Err(CanonicalChainError::Builder) + .attach_printable("tried to take an empty segment"); + }; + + let mut segment_canonical = Vec::with_capacity(canonical.len()); + + let starting_block_number = info.first_block.number; + + for (offset, hash) in canonical.iter().enumerate() { + let cursor = Cursor { + number: starting_block_number + offset as u64, + hash: hash.clone(), + }; + + let reorgs_at_block = reorgs.get(&cursor.number).cloned().unwrap_or_default(); + + segment_canonical.push(CanonicalBlock { + hash: hash.clone(), + reorgs: reorgs_at_block, + }); + } + + let extra_reorgs = reorgs + .iter() + .flat_map(|(block_number, reorg)| { + if *block_number > info.last_block.number { + Some(ExtraReorg { + block_number: *block_number, + reorgs: reorg.clone(), + }) + } else { + None + } + }) + .collect(); + + Ok(CanonicalChainSegment { + previous_segment: previous_segment.clone(), + info: info.clone(), + canonical: segment_canonical, + extra_reorgs, + }) + } + + /// Take the first `size` blocks from the current segment. + pub fn take_segment( + &mut self, + size: usize, + ) -> Result { + let CanonicalChainBuilder::Building { + canonical, + info, + reorgs, + previous_segment, + } = self + else { + return Err(CanonicalChainError::Builder) + .attach_printable("tried to take an empty segment"); + }; + + if info.last_block.number - info.first_block.number < size as u64 { + return Err(CanonicalChainError::Builder) + .attach_printable("tried to take a segment that is too small"); + } + + let segment_last_block_cursor = { + let hash = canonical[size - 1].clone(); + Cursor { + number: info.first_block.number + size as u64 - 1, + hash, + } + }; + + let mut segment_canonical = Vec::with_capacity(size); + let starting_block_number = info.first_block.number; + for (offset, hash) in canonical.drain(..size).enumerate() { + let cursor = Cursor { + number: starting_block_number + offset as u64, + hash: hash.clone(), + }; + + let reorgs_at_block = reorgs.remove(&cursor.number).unwrap_or_default(); + + segment_canonical.push(CanonicalBlock { + hash, + reorgs: reorgs_at_block, + }); + } + + let segment_info = CanonicalChainSegmentInfo { + first_block: info.first_block.clone(), + last_block: segment_last_block_cursor, + }; + + let segment_previous_segment = previous_segment.clone(); + + *previous_segment = Some(segment_info.clone()); + + info.first_block.number += size as u64; + info.first_block.hash = canonical[0].clone(); + + Ok(CanonicalChainSegment { + previous_segment: segment_previous_segment, + info: segment_info, + canonical: segment_canonical, + extra_reorgs: Vec::new(), + }) + } +} + +impl CanonicalChainSegment { + pub fn canonical(&self, block_number: u64) -> Result { + if block_number < self.info.first_block.number { + return Err(CanonicalChainError::View) + .attach_printable("block number is before the first block") + .attach_printable_lazy(|| format!("block number: {}", block_number)) + .attach_printable_lazy(|| format!("first block: {:?}", self.info.first_block)); + } + + if block_number > self.info.last_block.number { + return Err(CanonicalChainError::View) + .attach_printable("block number is after the last block") + .attach_printable_lazy(|| format!("block number: {}", block_number)) + .attach_printable_lazy(|| format!("last block: {:?}", self.info.last_block)); + } + + let offset = block_number - self.info.first_block.number; + + let canonical = &self.canonical[offset as usize]; + let cursor = Cursor::new(block_number, canonical.hash.clone()); + + Ok(cursor) + } + + pub fn reconnect(&self, cursor: &Cursor) -> Result { + if cursor.number < self.info.first_block.number { + return Err(CanonicalChainError::View) + .attach_printable("cursor is before the first block") + .attach_printable_lazy(|| format!("cursor: {cursor:?}")) + .attach_printable_lazy(|| format!("first block: {:?}", self.info.first_block)); + } + + if cursor.number > self.info.last_block.number { + // The block could have been reorged while the chain shrunk. + let Some(reorgs) = self + .extra_reorgs + .iter() + .find(|r| r.block_number == cursor.number) + else { + return Err(CanonicalChainError::View) + .attach_printable("cursor is after the last block") + .attach_printable_lazy(|| format!("cursor: {cursor:?}")) + .attach_printable_lazy(|| format!("last block: {:?}", self.info.last_block)); + }; + + let Some(reorg_target) = reorgs.reorgs.get(&cursor.hash).cloned() else { + return Ok(ReconnectAction::Unknown); + }; + + return Ok(ReconnectAction::OfflineReorg(reorg_target)); + } + + let offset = cursor.number - self.info.first_block.number; + + let canonical = &self.canonical[offset as usize]; + + if canonical.hash == cursor.hash || cursor.hash.is_zero() { + return Ok(ReconnectAction::Continue); + } + + let Some(reorg_target) = canonical.reorgs.get(&cursor.hash).cloned() else { + return Ok(ReconnectAction::Unknown); + }; + + Ok(ReconnectAction::OfflineReorg(reorg_target)) + } +} + +impl Default for CanonicalChainBuilder { + fn default() -> Self { + Self::new() + } +} + +impl error_stack::Context for CanonicalChainError {} + +impl std::fmt::Display for CanonicalChainError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CanonicalChainError::Builder => write!(f, "canonical chain builder error"), + CanonicalChainError::View => write!(f, "canonical chain view error"), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{new_test_cursor, Hash}; + + use super::{BlockInfo, CanonicalChainBuilder, ReconnectAction}; + + fn genesis_block(chain: u8) -> BlockInfo { + let c = new_test_cursor(1_000, chain); + BlockInfo { + number: c.number, + hash: c.hash, + parent: Hash::default(), + } + } + + fn next_block(block: &BlockInfo, chain: u8) -> BlockInfo { + let c = new_test_cursor(block.number + 1, chain); + BlockInfo { + number: c.number, + hash: c.hash, + parent: block.hash.clone(), + } + } + + /* + * + * 1_006/1 1_040/1 + * o - - - - - o + * / + * o - - - - o - - - - o + * 1_000/0 1_005/0 1_010/0 + */ + #[test] + fn test_canonical_chain_builder() { + let mut builder = CanonicalChainBuilder::new(); + + let mut block = genesis_block(0); + builder.grow(block.clone()).unwrap(); + + for _ in 0..5 { + block = next_block(&block, 0); + builder.grow(block.clone()).unwrap(); + } + + let checkpoint = block.clone(); + + for _ in 0..5 { + block = next_block(&block, 0); + builder.grow(block.clone()).unwrap(); + } + + assert_eq!(builder.segment_size(), 11); + + // Can't shrink to a block that is not in the segment. + assert!(builder.shrink(new_test_cursor(999, 0)).is_err()); + assert!(builder.shrink(new_test_cursor(1_011, 0)).is_err()); + + builder.shrink(checkpoint.cursor()).unwrap(); + assert_eq!(builder.segment_size(), 6); + + // Can't grow to a block if the head has been reorged. + block = next_block(&block, 0); + assert!(builder.grow(block.clone()).is_err()); + + block = checkpoint.clone(); + for _ in 0..35 { + block = next_block(&block, 1); + builder.grow(block.clone()).unwrap(); + } + + assert_eq!(builder.segment_size(), 41); + + { + let segment = builder.current_segment().unwrap(); + assert!(segment.previous_segment.is_none()); + assert_eq!(segment.info.first_block, new_test_cursor(1_000, 0)); + assert_eq!(segment.info.last_block, new_test_cursor(1_040, 1)); + assert_eq!(segment.canonical.len(), 41); + + for (offset, canon) in segment.canonical.iter().enumerate() { + let block_number = 1_000 + offset as u64; + if offset < 6 { + assert_eq!(canon.hash, new_test_cursor(block_number, 0).hash); + assert!(canon.reorgs.is_empty()); + } else { + if offset < 11 { + let old_cursor = new_test_cursor(block_number, 0); + let reorg_target = canon.reorgs.get(&old_cursor.hash).unwrap(); + assert_eq!(*reorg_target, checkpoint.cursor()); + } else { + assert!(canon.reorgs.is_empty()); + } + assert_eq!(canon.hash, new_test_cursor(block_number, 1).hash); + } + } + + let action = segment.reconnect(&new_test_cursor(1_005, 0)).unwrap(); + assert_eq!(action, ReconnectAction::Continue); + + let action = segment.reconnect(&new_test_cursor(1_006, 1)).unwrap(); + assert_eq!(action, ReconnectAction::Continue); + + let action = segment.reconnect(&new_test_cursor(1_006, 0)).unwrap(); + assert_eq!( + action, + ReconnectAction::OfflineReorg(new_test_cursor(1_005, 0)) + ); + } + + { + let segment = builder.take_segment(25).unwrap(); + assert!(segment.previous_segment.is_none()); + assert_eq!(segment.info.first_block, new_test_cursor(1_000, 0)); + assert_eq!(segment.info.last_block, new_test_cursor(1_024, 1)); + assert_eq!(segment.canonical.len(), 25); + + let segment = builder.current_segment().unwrap(); + let previous = segment.previous_segment.unwrap(); + + assert_eq!(previous.first_block, new_test_cursor(1_000, 0)); + assert_eq!(previous.last_block, new_test_cursor(1_024, 1)); + assert_eq!(segment.info.first_block, new_test_cursor(1_025, 1)); + assert_eq!(segment.info.last_block, new_test_cursor(1_040, 1)); + assert_eq!(segment.canonical.len(), 16); + } + } + + /* + * + * 1_004/2 1_013/2 + * o - - - - - o + * / 1_006/1 1_007/1 + * / o - - - o + * / / + * o - - - - o - - - - o - - - - o + * 1_000/0 1_003/0 1_005/0 1_010/0 + */ + #[test] + fn test_reorg_on_top_of_reorg() { + let mut builder = CanonicalChainBuilder::new(); + + let mut block = genesis_block(0); + builder.grow(block.clone()).unwrap(); + + for _ in 0..3 { + block = next_block(&block, 0); + builder.grow(block.clone()).unwrap(); + } + + let first_checkpoint = block.clone(); + assert_eq!(first_checkpoint.cursor(), new_test_cursor(1_003, 0)); + + for _ in 0..2 { + block = next_block(&block, 0); + builder.grow(block.clone()).unwrap(); + } + + let second_checkpoint = block.clone(); + assert_eq!(second_checkpoint.cursor(), new_test_cursor(1_005, 0)); + + for _ in 0..5 { + block = next_block(&block, 0); + builder.grow(block.clone()).unwrap(); + } + + { + let segment = builder.current_segment().unwrap(); + assert!(segment.previous_segment.is_none()); + assert_eq!(segment.info.first_block, new_test_cursor(1_000, 0)); + assert_eq!(segment.info.last_block, new_test_cursor(1_010, 0)); + } + + builder.shrink(second_checkpoint.cursor()).unwrap(); + + block = second_checkpoint.clone(); + for _ in 0..2 { + block = next_block(&block, 1); + builder.grow(block.clone()).unwrap(); + } + + { + let segment = builder.current_segment().unwrap(); + assert!(segment.previous_segment.is_none()); + assert_eq!(segment.info.first_block, new_test_cursor(1_000, 0)); + assert_eq!(segment.info.last_block, new_test_cursor(1_007, 1)); + + for i in 0..6 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 0).hash); + assert!(canon.reorgs.is_empty()); + } + + for i in 6..8 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 1).hash); + assert_eq!(canon.reorgs.len(), 1); + let old_block = new_test_cursor(block_number, 0); + let target = canon.reorgs.get(&old_block.hash).unwrap(); + assert_eq!(*target, second_checkpoint.cursor()); + } + } + + builder.shrink(first_checkpoint.cursor()).unwrap(); + + { + let segment = builder.current_segment().unwrap(); + let action = segment.reconnect(&new_test_cursor(1_010, 0)).unwrap(); + assert_eq!( + action, + ReconnectAction::OfflineReorg(second_checkpoint.cursor()) + ); + } + + block = first_checkpoint.clone(); + for _ in 0..10 { + block = next_block(&block, 2); + builder.grow(block.clone()).unwrap(); + } + + { + let segment = builder.current_segment().unwrap(); + assert!(segment.previous_segment.is_none()); + assert_eq!(segment.info.first_block, new_test_cursor(1_000, 0)); + assert_eq!(segment.info.last_block, new_test_cursor(1_013, 2)); + + // Before the first checkpoint. + for i in 0..4 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 0).hash); + assert!(canon.reorgs.is_empty()); + } + + // Between the first and second checkpoints. + // These blocks have been removed by the second reorg. + for i in 4..6 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 2).hash); + assert_eq!(canon.reorgs.len(), 1); + let old_block = new_test_cursor(block_number, 0); + let target = canon.reorgs.get(&old_block.hash).unwrap(); + assert_eq!(*target, first_checkpoint.cursor()); + } + + // After the second checkpoint. + // These blocks have been removed by the first and second reorg. + for i in 6..8 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 2).hash); + assert_eq!(canon.reorgs.len(), 2); + { + let old_block = new_test_cursor(block_number, 0); + let target = canon.reorgs.get(&old_block.hash).unwrap(); + assert_eq!(*target, second_checkpoint.cursor()); + } + { + let old_block = new_test_cursor(block_number, 1); + let target = canon.reorgs.get(&old_block.hash).unwrap(); + assert_eq!(*target, first_checkpoint.cursor()); + } + } + + // These blocks have been removed by the first reorg. + for i in 8..11 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 2).hash); + let old_block = new_test_cursor(block_number, 0); + let target = canon.reorgs.get(&old_block.hash).unwrap(); + assert_eq!(*target, second_checkpoint.cursor()); + } + + // These blocks have never been part of a reorg. + for i in 11..14 { + let block_number = 1_000 + i as u64; + let canon = &segment.canonical[i]; + assert_eq!(canon.hash, new_test_cursor(block_number, 2).hash); + assert!(canon.reorgs.is_empty()); + } + } + } +} diff --git a/common/src/chain_store.rs b/common/src/chain_store.rs new file mode 100644 index 00000000..47af864e --- /dev/null +++ b/common/src/chain_store.rs @@ -0,0 +1,156 @@ +use bytes::Bytes; +use error_stack::{Result, ResultExt}; + +use crate::{ + chain::CanonicalChainSegment, + file_cache::{FileCache, FileCacheError}, + object_store::{GetOptions, ObjectETag, ObjectStore, ObjectStoreResultExt, PutOptions}, +}; + +static CANONICAL_PREFIX: &str = "canon"; +static RECENT_CHAIN_SEGMENT_NAME: &str = "recent"; + +#[derive(Debug)] +pub struct ChainStoreError; + +#[derive(Clone)] +pub struct ChainStore { + cache: FileCache, + client: ObjectStore, +} + +impl ChainStore { + pub fn new(client: ObjectStore, cache: FileCache) -> Self { + Self { client, cache } + } + + pub async fn get( + &self, + first_block_number: u64, + ) -> Result, ChainStoreError> { + let filename = self.segment_filename(first_block_number); + self.get_impl(&filename, None, false).await + } + + pub async fn put( + &self, + segment: &CanonicalChainSegment, + ) -> Result { + let filename = self.segment_filename(segment.info.first_block.number); + self.put_impl(&filename, segment).await + } + + pub async fn put_recent( + &self, + segment: &CanonicalChainSegment, + ) -> Result { + self.put_impl(RECENT_CHAIN_SEGMENT_NAME, segment).await + } + + pub async fn get_recent( + &self, + etag: Option, + ) -> Result, ChainStoreError> { + self.get_impl(RECENT_CHAIN_SEGMENT_NAME, etag, true).await + } + + async fn put_impl( + &self, + name: &str, + segment: &CanonicalChainSegment, + ) -> Result { + let serialized = rkyv::to_bytes::(segment) + .change_context(ChainStoreError) + .attach_printable("failed to serialize chain segment")?; + + let bytes = Bytes::copy_from_slice(serialized.as_slice()); + + let response = self + .client + .put(&self.format_key(name), bytes, PutOptions::default()) + .await + .change_context(ChainStoreError) + .attach_printable("failed to put chain segment") + .attach_printable_lazy(|| format!("name: {}", name))?; + + Ok(response.etag) + } + + async fn get_impl( + &self, + name: &str, + etag: Option, + skip_cache: bool, + ) -> Result, ChainStoreError> { + let key = self.format_key(name); + + if skip_cache { + let Some(bytes) = self.get_as_bytes(&key, etag).await? else { + return Ok(None); + }; + + let segment = rkyv::from_bytes::<_, rkyv::rancor::Error>(&bytes) + .change_context(ChainStoreError) + .attach_printable("failed to deserialize chain segment") + .attach_printable_lazy(|| format!("name: {}", name))?; + + return Ok(Some(segment)); + } + + if let Some(existing) = self + .cache + .get(&key) + .await + .map_err(FileCacheError::Foyer) + .change_context(ChainStoreError)? + { + let segment = rkyv::from_bytes::<_, rkyv::rancor::Error>(existing.value()) + .change_context(ChainStoreError) + .attach_printable("failed to deserialize chain segment") + .attach_printable_lazy(|| format!("name: {}", name))?; + + Ok(Some(segment)) + } else { + let Some(bytes) = self.get_as_bytes(&key, etag).await? else { + return Ok(None); + }; + + let entry = self.cache.insert(key, bytes); + + let segment = rkyv::from_bytes::<_, rkyv::rancor::Error>(entry.value()) + .change_context(ChainStoreError) + .attach_printable("failed to deserialize chain segment") + .attach_printable_lazy(|| format!("name: {}", name))?; + + Ok(Some(segment)) + } + } + + async fn get_as_bytes( + &self, + key: &str, + etag: Option, + ) -> Result, ChainStoreError> { + match self.client.get(key, GetOptions { etag }).await { + Ok(response) => Ok(Some(response.body)), + Err(err) if err.is_not_found() => Ok(None), + Err(err) => Err(err).change_context(ChainStoreError), + } + } + + fn format_key(&self, key: &str) -> String { + format!("{}/{}", CANONICAL_PREFIX, key) + } + + fn segment_filename(&self, first_block: u64) -> String { + format!("z-{:0>10}", first_block) + } +} + +impl error_stack::Context for ChainStoreError {} + +impl std::fmt::Display for ChainStoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "chain store error") + } +} diff --git a/common/src/chain_view/error.rs b/common/src/chain_view/error.rs new file mode 100644 index 00000000..acfc950d --- /dev/null +++ b/common/src/chain_view/error.rs @@ -0,0 +1,10 @@ +#[derive(Debug)] +pub struct ChainViewError; + +impl error_stack::Context for ChainViewError {} + +impl std::fmt::Display for ChainViewError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "chain view error") + } +} diff --git a/common/src/chain_view/full.rs b/common/src/chain_view/full.rs new file mode 100644 index 00000000..d0f1ff6c --- /dev/null +++ b/common/src/chain_view/full.rs @@ -0,0 +1,150 @@ +use error_stack::{Result, ResultExt}; +use tracing::debug; + +use crate::chain::{CanonicalChainSegment, ReconnectAction}; +use crate::chain_store::ChainStore; +use crate::Cursor; + +use super::ChainViewError; + +pub enum CanonicalCursor { + BeforeAvailable(Cursor), + AfterAvailable(Cursor), + Canonical(Cursor), +} + +#[derive(Debug, Clone)] +pub enum NextCursor { + /// Continue streaming from the given cursor. + Continue(Cursor), + /// Reorg to the given cursor. + Invalidate(Cursor), + /// Nothing to do. + AtHead, +} + +pub struct FullCanonicalChain { + store: ChainStore, + pub(crate) starting_block: u64, + chain_segment_size: usize, + recent: CanonicalChainSegment, +} + +impl FullCanonicalChain { + pub async fn initialize( + store: ChainStore, + starting_block: u64, + chain_segment_size: usize, + ) -> Result { + let recent = store + .get_recent(None) + .await + .change_context(ChainViewError) + .attach_printable("failed to get recent canonical chain segment")? + .ok_or(ChainViewError) + .attach_printable("recent canonical chain segment not found")?; + + Ok(Self { + store, + starting_block, + chain_segment_size, + recent, + }) + } + + pub async fn get_next_cursor( + &self, + cursor: &Option, + ) -> Result { + let Some(cursor) = cursor else { + let first_available = self.get_canonical_impl(self.starting_block).await?; + return Ok(NextCursor::Continue(first_available)); + }; + + let segment = self.get_chain_segment(cursor.number).await?; + + match segment.reconnect(cursor).change_context(ChainViewError)? { + ReconnectAction::Continue => { + if cursor.number == self.recent.info.last_block.number { + return Ok(NextCursor::AtHead); + } + let next_available = self.get_canonical_impl(cursor.number + 1).await?; + Ok(NextCursor::Continue(next_available)) + } + ReconnectAction::OfflineReorg(target) => Ok(NextCursor::Invalidate(target)), + ReconnectAction::Unknown => Err(ChainViewError).attach_printable("unknown cursor"), + } + } + + pub async fn get_head(&self) -> Result { + Ok(self.recent.info.last_block.clone()) + } + + pub async fn get_canonical( + &self, + block_number: u64, + ) -> Result { + if block_number > self.recent.info.last_block.number { + return Ok(CanonicalCursor::AfterAvailable( + self.recent.info.last_block.clone(), + )); + } + + if block_number < self.starting_block { + let first_available = self.get_canonical_impl(self.starting_block).await?; + return Ok(CanonicalCursor::BeforeAvailable(first_available)); + } + + let cursor = self.get_canonical_impl(block_number).await?; + Ok(CanonicalCursor::Canonical(cursor)) + } + + pub async fn refresh_recent(&mut self) -> Result<(), ChainViewError> { + debug!("refreshing recent canonical chain segment"); + + let Ok(Some(recent)) = self.store.get_recent(None).await else { + return Ok(()); + }; + + self.recent = recent; + + Ok(()) + } + + async fn get_chain_segment( + &self, + block_number: u64, + ) -> Result { + if self.recent.info.first_block.number <= block_number { + return Ok(self.recent.clone()); + } + + let chain_segment_start = + chain_segment_start(block_number, self.starting_block, self.chain_segment_size); + + let segment = self + .store + .get(chain_segment_start) + .await + .change_context(ChainViewError) + .attach_printable("failed to get chain segment")? + .ok_or(ChainViewError) + .attach_printable("chain segment not found")?; + + Ok(segment) + } + + async fn get_canonical_impl(&self, block_number: u64) -> Result { + let segment = self.get_chain_segment(block_number).await?; + let cursor = segment + .canonical(block_number) + .change_context(ChainViewError) + .attach_printable("failed to get canonical block")?; + Ok(cursor) + } +} + +fn chain_segment_start(block_number: u64, starting_block: u64, chain_segment_size: usize) -> u64 { + let chain_segment_size = chain_segment_size as u64; + (block_number - starting_block) / chain_segment_size * chain_segment_size + starting_block +} diff --git a/common/src/chain_view/mod.rs b/common/src/chain_view/mod.rs new file mode 100644 index 00000000..8a8e4b33 --- /dev/null +++ b/common/src/chain_view/mod.rs @@ -0,0 +1,9 @@ +mod error; +mod full; +mod sync; +mod view; + +pub use self::error::ChainViewError; +pub use self::full::{CanonicalCursor, NextCursor}; +pub use self::sync::{chain_view_sync_loop, ChainViewSyncService}; +pub use self::view::ChainView; diff --git a/common/src/chain_view/sync.rs b/common/src/chain_view/sync.rs new file mode 100644 index 00000000..a4178ab6 --- /dev/null +++ b/common/src/chain_view/sync.rs @@ -0,0 +1,224 @@ +use std::time::Duration; + +use apibara_etcd::EtcdClient; +use error_stack::{Result, ResultExt}; +use futures::TryStreamExt; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +use crate::{ + chain_store::ChainStore, + file_cache::FileCache, + ingestion::{IngestionStateClient, IngestionStateUpdate}, + object_store::ObjectStore, + options_store::OptionsStore, +}; + +use super::{error::ChainViewError, full::FullCanonicalChain, view::ChainView}; + +pub struct ChainViewSyncService { + tx: tokio::sync::watch::Sender>, + etcd_client: EtcdClient, + chain_store: ChainStore, +} + +impl ChainViewSyncService { + fn new( + tx: tokio::sync::watch::Sender>, + chain_file_cache: FileCache, + etcd_client: EtcdClient, + object_store: ObjectStore, + ) -> Self { + let chain_store = ChainStore::new(object_store, chain_file_cache); + Self { + tx, + etcd_client, + chain_store, + } + } + + pub async fn start(self, ct: CancellationToken) -> Result<(), ChainViewError> { + info!("starting chain view sync service"); + let mut ingestion_state_client = IngestionStateClient::new(&self.etcd_client); + + let starting_block = loop { + if ct.is_cancelled() { + return Ok(()); + } + + let starting_block = ingestion_state_client + .get_starting_block() + .await + .change_context(ChainViewError)?; + + if let Some(starting_block) = starting_block { + break starting_block; + } + + info!(step = "starting_block", "waiting for ingestion to start"); + tokio::time::sleep(Duration::from_secs(10)).await; + }; + + let finalized = loop { + if ct.is_cancelled() { + return Ok(()); + } + + let finalized = ingestion_state_client + .get_finalized() + .await + .change_context(ChainViewError)?; + + if let Some(finalized) = finalized { + break finalized; + } + + info!(step = "finalized_block", "waiting for ingestion to start"); + tokio::time::sleep(Duration::from_secs(10)).await; + }; + + let segmented = ingestion_state_client + .get_segmented() + .await + .change_context(ChainViewError)?; + + let grouped = ingestion_state_client + .get_grouped() + .await + .change_context(ChainViewError)?; + + loop { + if ct.is_cancelled() { + return Ok(()); + } + + let recent = ingestion_state_client + .get_ingested() + .await + .change_context(ChainViewError)?; + + if recent.is_some() { + break; + } + + info!(step = "recent", "waiting for ingestion to start"); + tokio::time::sleep(Duration::from_secs(10)).await; + } + + if ct.is_cancelled() { + return Ok(()); + } + + let mut options_store = OptionsStore::new(&self.etcd_client); + let chain_segment_size = options_store + .get_chain_segment_size() + .await + .change_context(ChainViewError) + .attach_printable("failed to get chain segment size options")? + .ok_or(ChainViewError) + .attach_printable("chain segment size option not found")?; + + let canonical_chain = FullCanonicalChain::initialize( + self.chain_store.clone(), + starting_block, + chain_segment_size, + ) + .await?; + + let segment_size = options_store + .get_segment_size() + .await + .change_context(ChainViewError) + .attach_printable("failed to get segment size options")? + .ok_or(ChainViewError) + .attach_printable("segment size option not found")?; + + let group_size = options_store + .get_group_size() + .await + .change_context(ChainViewError) + .attach_printable("failed to get group size options")? + .ok_or(ChainViewError) + .attach_printable("group size option not found")?; + + let chain_view = ChainView::new( + finalized, + segmented, + grouped, + segment_size as u64, + group_size as u64, + canonical_chain, + ); + + self.tx + .send(Some(chain_view.clone())) + .change_context(ChainViewError)?; + + info!("finished initializing chain view"); + + if ct.is_cancelled() { + return Ok(()); + } + + let state_changes = ingestion_state_client + .watch_changes(ct.clone()) + .await + .change_context(ChainViewError)?; + + tokio::pin!(state_changes); + + while let Some(update) = state_changes + .try_next() + .await + .change_context(ChainViewError)? + { + info!(update = ?update, "chain view sync update"); + match update { + IngestionStateUpdate::StartingBlock(block) => { + // The starting block should never be updated. + warn!(starting_block = block, "chain view starting block updated"); + } + IngestionStateUpdate::Finalized(block) => { + chain_view.set_finalized_block(block).await; + } + IngestionStateUpdate::Segmented(block) => { + chain_view.set_segmented_block(block).await; + } + IngestionStateUpdate::Grouped(block) => { + chain_view.set_grouped_block(block).await; + } + IngestionStateUpdate::Ingested(_etag) => { + chain_view.refresh_recent().await?; + } + } + + self.tx + .send(Some(chain_view.clone())) + .change_context(ChainViewError)?; + } + + if ct.is_cancelled() { + return Ok(()); + } + + Err(ChainViewError).attach_printable("etcd sync stream ended unexpectedly") + } +} + +pub async fn chain_view_sync_loop( + chain_file_cache: FileCache, + etcd_client: EtcdClient, + object_store: ObjectStore, +) -> Result< + ( + tokio::sync::watch::Receiver>, + ChainViewSyncService, + ), + ChainViewError, +> { + let (tx, rx) = tokio::sync::watch::channel(None); + + let sync_service = ChainViewSyncService::new(tx, chain_file_cache, etcd_client, object_store); + + Ok((rx, sync_service)) +} diff --git a/common/src/chain_view/view.rs b/common/src/chain_view/view.rs new file mode 100644 index 00000000..6efc9bad --- /dev/null +++ b/common/src/chain_view/view.rs @@ -0,0 +1,256 @@ +use std::sync::Arc; + +use error_stack::Result; +use tokio::sync::{Notify, RwLock}; + +use crate::Cursor; + +use super::{ + error::ChainViewError, + full::{FullCanonicalChain, NextCursor}, + CanonicalCursor, +}; + +/// Provides a read-only view of the canonical chain. +#[derive(Clone)] +pub struct ChainView(Arc>); + +pub(crate) struct ChainViewInner { + finalized: u64, + segmented: Option, + grouped: Option, + canonical: FullCanonicalChain, + segment_size: u64, + group_size: u64, + head_notify: Arc, + finalized_notify: Arc, + segmented_notify: Arc, +} + +impl ChainView { + pub(crate) fn new( + finalized: u64, + segmented: Option, + grouped: Option, + segment_size: u64, + group_size: u64, + canonical: FullCanonicalChain, + ) -> Self { + let inner = ChainViewInner { + finalized, + segmented, + grouped, + canonical, + segment_size, + group_size, + head_notify: Arc::new(Notify::new()), + finalized_notify: Arc::new(Notify::new()), + segmented_notify: Arc::new(Notify::new()), + }; + + Self(Arc::new(RwLock::new(inner))) + } + + pub async fn get_segment_size(&self) -> u64 { + self.0.read().await.segment_size + } + + pub async fn get_group_size(&self) -> u64 { + self.0.read().await.group_size + } + + pub async fn get_segment_start_block(&self, block: u64) -> u64 { + let inner = self.0.read().await; + inner.get_segment_start_block(block) + } + + pub async fn get_segment_end_block(&self, block: u64) -> u64 { + let inner = self.0.read().await; + inner.get_segment_end_block(block) + } + + pub async fn has_segment_for_block(&self, block: u64) -> bool { + let inner = self.0.read().await; + inner.has_segment_for_block(block) + } + + pub async fn has_group_for_block(&self, block: u64) -> bool { + let inner = self.0.read().await; + inner.has_group_for_block(block) + } + + pub async fn get_group_start_block(&self, block: u64) -> u64 { + let inner = self.0.read().await; + inner.get_group_start_block(block) + } + + pub async fn get_group_end_block(&self, block: u64) -> u64 { + let inner = self.0.read().await; + inner.get_group_end_block(block) + } + + pub async fn get_blocks_in_group(&self) -> u64 { + let inner = self.0.read().await; + inner.group_size * inner.segment_size + } + + pub async fn get_next_cursor( + &self, + cursor: &Option, + ) -> Result { + let inner = self.0.read().await; + inner.canonical.get_next_cursor(cursor).await + } + + pub async fn get_canonical( + &self, + block_number: u64, + ) -> Result { + let inner = self.0.read().await; + inner.canonical.get_canonical(block_number).await + } + + pub async fn get_head(&self) -> Result { + let inner = self.0.read().await; + inner.canonical.get_head().await + } + + pub async fn head_changed(&self) { + let notify = { + let inner = self.0.read().await; + inner.head_notify.clone() + }; + notify.notified().await; + } + + pub async fn finalized_changed(&self) { + let notify = { + let inner = self.0.read().await; + inner.finalized_notify.clone() + }; + notify.notified().await; + } + + pub async fn segmented_changed(&self) { + let notify = { + let inner = self.0.read().await; + inner.segmented_notify.clone() + }; + notify.notified().await; + } + + pub async fn get_starting_cursor(&self) -> Result { + let inner = self.0.read().await; + let starting_block = inner.canonical.starting_block; + match inner.canonical.get_canonical(starting_block).await? { + CanonicalCursor::Canonical(cursor) => Ok(cursor), + _ => Ok(Cursor::new_finalized(starting_block)), + } + } + + pub async fn get_finalized_cursor(&self) -> Result { + let inner = self.0.read().await; + match inner.canonical.get_canonical(inner.finalized).await? { + CanonicalCursor::Canonical(cursor) => Ok(cursor), + _ => Ok(Cursor::new_finalized(inner.finalized)), + } + } + + pub async fn get_grouped_cursor(&self) -> Result, ChainViewError> { + let inner = self.0.read().await; + let Some(grouped) = inner.grouped else { + return Ok(None); + }; + + match inner.canonical.get_canonical(grouped).await? { + CanonicalCursor::Canonical(cursor) => Ok(cursor.into()), + _ => Ok(Cursor::new_finalized(grouped).into()), + } + } + + pub async fn get_segmented_cursor(&self) -> Result, ChainViewError> { + let inner = self.0.read().await; + let Some(segmented) = inner.segmented else { + return Ok(None); + }; + + match inner.canonical.get_canonical(segmented).await? { + CanonicalCursor::Canonical(cursor) => Ok(cursor.into()), + _ => Ok(Cursor::new_finalized(segmented).into()), + } + } + + pub(crate) async fn set_finalized_block(&self, block: u64) { + let mut inner = self.0.write().await; + inner.finalized = block; + inner.finalized_notify.notify_waiters(); + } + + pub(crate) async fn set_segmented_block(&self, block: u64) { + let mut inner = self.0.write().await; + inner.segmented = Some(block); + inner.segmented_notify.notify_waiters(); + } + + pub(crate) async fn set_grouped_block(&self, block: u64) { + let mut inner = self.0.write().await; + inner.grouped = Some(block); + } + + pub(crate) async fn refresh_recent(&self) -> Result<(), ChainViewError> { + let mut inner = self.0.write().await; + + let prev_head = inner.canonical.get_head().await?; + inner.canonical.refresh_recent().await?; + let new_head = inner.canonical.get_head().await?; + + if prev_head != new_head { + inner.head_notify.notify_waiters(); + } + + Ok(()) + } +} + +impl ChainViewInner { + pub fn get_segment_start_block(&self, block: u64) -> u64 { + let starting_block = self.canonical.starting_block; + let blocks = block - starting_block; + let segment_count = blocks / self.segment_size; + starting_block + segment_count * self.segment_size + } + + pub fn get_segment_end_block(&self, block: u64) -> u64 { + self.get_segment_start_block(block) + self.segment_size - 1 + } + + pub fn has_segment_for_block(&self, block: u64) -> bool { + let Some(segmented) = self.segmented else { + return false; + }; + + let segment_end = self.get_segment_end_block(block); + segment_end <= segmented + } + + pub fn has_group_for_block(&self, block: u64) -> bool { + let Some(grouped) = self.grouped else { + return false; + }; + + let group_end = self.get_group_end_block(block); + group_end <= grouped + } + + pub fn get_group_end_block(&self, block: u64) -> u64 { + let blocks_in_group = self.group_size * self.segment_size; + self.get_group_start_block(block) + blocks_in_group - 1 + } + + pub fn get_group_start_block(&self, block: u64) -> u64 { + let starting_block = self.canonical.starting_block; + let blocks_in_group = self.group_size * self.segment_size; + let group_count = (block - starting_block) / blocks_in_group; + starting_block + group_count * blocks_in_group + } +} diff --git a/common/src/cli.rs b/common/src/cli.rs new file mode 100644 index 00000000..cf76c95f --- /dev/null +++ b/common/src/cli.rs @@ -0,0 +1,112 @@ +use apibara_etcd::{AuthOptions, EtcdClient, EtcdClientError, EtcdClientOptions}; +use aws_config::{meta::region::RegionProviderChain, Region}; +use clap::Args; +use error_stack::Result; + +use crate::{ + compaction::CompactionArgs, + file_cache::FileCacheArgs, + ingestion::IngestionArgs, + object_store::{ObjectStore, ObjectStoreOptions}, + server::ServerArgs, +}; + +#[derive(Args, Debug)] +pub struct StartArgs { + #[clap(flatten)] + pub object_store: ObjectStoreArgs, + #[clap(flatten)] + pub etcd: EtcdArgs, + #[clap(flatten)] + pub ingestion: IngestionArgs, + #[clap(flatten)] + pub compaction: CompactionArgs, + #[clap(flatten)] + pub server: ServerArgs, + #[clap(flatten)] + pub cache: FileCacheArgs, +} + +#[derive(Args, Clone, Debug)] +pub struct ObjectStoreArgs { + /// The S3 bucket to use. + #[arg(long = "s3.bucket", env = "DNA_S3_BUCKET")] + pub s3_bucket: String, + /// Under which prefix to store the data. + #[arg(long = "s3.prefix", env = "DNA_S3_PREFIX")] + pub s3_prefix: Option, + /// The S3 endpoint URL. + #[arg(long = "s3.endpoint", env = "DNA_S3_ENDPOINT")] + pub s3_endpoint: Option, + /// The S3 region. + #[arg(long = "s3.region", env = "DNA_S3_REGION")] + pub s3_region: Option, +} + +#[derive(Args, Clone, Debug)] +pub struct EtcdArgs { + /// The etcd endpoints. + #[arg( + long = "etcd.endpoints", + env = "DNA_ETCD_ENDPOINTS", + value_delimiter = ',', + num_args = 1.., + )] + pub etcd_endpoints: Vec, + /// The etcd prefix. + #[arg(long = "etcd.prefix", env = "DNA_ETCD_PREFIX")] + pub etcd_prefix: Option, + /// The etcd username. + #[arg(long = "etcd.user", env = "DNA_ETCD_USER")] + pub etcd_user: Option, + /// The etcd password. + #[arg(long = "etcd.password", env = "DNA_ETCD_PASSWORD")] + pub etcd_password: Option, +} + +impl ObjectStoreArgs { + pub async fn into_object_store_client(self) -> ObjectStore { + let mut config = aws_config::from_env(); + + if let Some(region) = self.s3_region.as_ref() { + let region = Region::new(region.clone()); + let region = RegionProviderChain::default_provider().or_else(region.clone()); + + config = config.region(region); + } + + if let Some(endpoint_url) = self.s3_endpoint.as_ref() { + config = config.endpoint_url(endpoint_url.clone()); + } + + let sdk_config = config.load().await; + let s3_config = aws_sdk_s3::Config::from(&sdk_config) + .to_builder() + .force_path_style(true) + .build(); + + let options = ObjectStoreOptions { + bucket: self.s3_bucket, + prefix: self.s3_prefix, + }; + + ObjectStore::new_from_config(s3_config, options) + } +} + +impl EtcdArgs { + pub async fn into_etcd_client(self) -> Result { + let auth = if let (Some(user), Some(password)) = (self.etcd_user, self.etcd_password) { + Some(AuthOptions { user, password }) + } else { + None + }; + + let options = EtcdClientOptions { + prefix: self.etcd_prefix, + auth, + }; + + EtcdClient::connect(self.etcd_endpoints, options).await + } +} diff --git a/common/src/compaction/cli.rs b/common/src/compaction/cli.rs new file mode 100644 index 00000000..00805f03 --- /dev/null +++ b/common/src/compaction/cli.rs @@ -0,0 +1,31 @@ +use clap::Args; + +#[derive(Args, Debug)] +pub struct CompactionArgs { + /// Whether to run the compaction service. + #[clap(long = "compaction.enabled", env = "DNA_COMPACTION_ENABLED")] + pub compaction_enabled: bool, + /// How many blocks in a single segment. + #[clap( + long = "compaction.segment-size", + env = "DNA_COMPACTION_SEGMENT_SIZE", + default_value = "1000" + )] + pub compaction_segment_size: usize, + /// How many segments in a single segment group. + #[clap( + long = "compaction.group-size", + env = "DNA_COMPACTION_GROUP_SIZE", + default_value = "100" + )] + pub compaction_group_size: usize, +} + +impl CompactionArgs { + pub fn to_compaction_options(&self) -> super::CompactionServiceOptions { + super::CompactionServiceOptions { + segment_size: self.compaction_segment_size, + group_size: self.compaction_group_size, + } + } +} diff --git a/common/src/compaction/error.rs b/common/src/compaction/error.rs new file mode 100644 index 00000000..cbc87a3e --- /dev/null +++ b/common/src/compaction/error.rs @@ -0,0 +1,10 @@ +#[derive(Debug)] +pub struct CompactionError; + +impl error_stack::Context for CompactionError {} + +impl std::fmt::Display for CompactionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "compaction error") + } +} diff --git a/common/src/compaction/group.rs b/common/src/compaction/group.rs new file mode 100644 index 00000000..d657360b --- /dev/null +++ b/common/src/compaction/group.rs @@ -0,0 +1,152 @@ +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use crate::{ + block_store::{BlockStoreReader, BlockStoreWriter}, + chain_view::{ChainView, NextCursor}, + compaction::group_builder::SegmentGroupBuilder, + file_cache::FileCacheError, + fragment::IndexGroupFragment, + ingestion::IngestionStateClient, + segment::Segment, + Cursor, +}; + +use super::CompactionError; + +pub struct SegmentGroupService { + segment_size: usize, + group_size: usize, + chain_view: ChainView, + block_store_reader: BlockStoreReader, + block_store_writer: BlockStoreWriter, + state_client: IngestionStateClient, +} + +impl SegmentGroupService { + pub fn new( + segment_size: usize, + group_size: usize, + chain_view: ChainView, + block_store_reader: BlockStoreReader, + block_store_writer: BlockStoreWriter, + state_client: IngestionStateClient, + ) -> Self { + Self { + segment_size, + group_size, + chain_view, + block_store_reader, + block_store_writer, + state_client, + } + } + + pub async fn start(mut self, ct: CancellationToken) -> Result<(), CompactionError> { + let chain_view = self.chain_view; + + let blocks_in_group = (self.group_size * self.segment_size) as u64; + + loop { + if ct.is_cancelled() { + return Ok(()); + } + + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + let first_block_in_group = if let Some(cursor) = chain_view + .get_grouped_cursor() + .await + .change_context(CompactionError)? + { + let NextCursor::Continue(cursor) = chain_view + .get_next_cursor(&Some(cursor.clone())) + .await + .change_context(CompactionError)? + else { + return Err(CompactionError) + .attach_printable("chain view returned invalid cursor") + .attach_printable_lazy(|| format!("cursor: {cursor}")); + }; + cursor + } else { + chain_view + .get_starting_cursor() + .await + .change_context(CompactionError)? + }; + + let Some(segmented) = chain_view + .get_segmented_cursor() + .await + .change_context(CompactionError)? + else { + let Some(_) = ct.run_until_cancelled(chain_view.segmented_changed()).await else { + return Ok(()); + }; + continue; + }; + + info!( + next_cursor = %first_block_in_group, + blocks_in_group = %blocks_in_group, + segmented = %segmented, + "compaction: group tick" + ); + + if first_block_in_group.number + blocks_in_group <= segmented.number { + info!(starting_cursor = %first_block_in_group, "creating new group"); + + let mut builder = SegmentGroupBuilder::default(); + + for i in 0..self.group_size { + let segment_start = + first_block_in_group.number + (i * self.segment_size) as u64; + let current_cursor = Cursor::new_finalized(segment_start); + let segment = self + .block_store_reader + .get_index_segment(¤t_cursor) + .await + .map_err(FileCacheError::Foyer) + .change_context(CompactionError)?; + + let segment = rkyv::from_bytes::< + Segment, + rkyv::rancor::Error, + >(segment.value()) + .change_context(CompactionError)?; + + builder + .add_segment(&segment) + .change_context(CompactionError) + .attach_printable("failed to add segment to group")?; + } + + let group = builder.build().change_context(CompactionError)?; + let last_block_in_group = first_block_in_group.number + blocks_in_group - 1; + + info!( + first_block = %first_block_in_group, + last_block = %last_block_in_group, + "uploading group to object store" + ); + + self.block_store_writer + .put_group(&first_block_in_group, &group) + .await + .change_context(CompactionError)?; + + self.state_client + .put_grouped(last_block_in_group) + .await + .change_context(CompactionError)?; + } else { + info!("compaction waiting for segmented change"); + let Some(_) = ct.run_until_cancelled(chain_view.segmented_changed()).await else { + return Ok(()); + }; + } + } + } +} diff --git a/common/src/compaction/group_builder.rs b/common/src/compaction/group_builder.rs new file mode 100644 index 00000000..15277270 --- /dev/null +++ b/common/src/compaction/group_builder.rs @@ -0,0 +1,94 @@ +use std::collections::BTreeMap; + +use error_stack::{Result, ResultExt}; + +use crate::{ + fragment::{self, FragmentId, IndexFragment, IndexGroupFragment, IndexId}, + index, + segment::{Segment, SegmentGroup}, + Cursor, +}; + +use super::CompactionError; + +#[derive(Debug, Default)] +pub struct SegmentGroupBuilder { + block_range: Option<(Cursor, Cursor)>, + block_indexes: BTreeMap>, +} + +impl SegmentGroupBuilder { + pub fn add_segment( + &mut self, + segment: &Segment, + ) -> Result<(), CompactionError> { + let segment_cursor = segment.first_block.clone(); + + self.block_range = match self.block_range.take() { + None => Some((segment_cursor.clone(), segment_cursor)), + Some((first_block, _)) => Some((first_block, segment_cursor)), + }; + + for block_data in segment.data.iter() { + let cursor: Cursor = block_data.cursor.clone(); + let block_number = cursor.number as u32; + + for index_fragment in block_data.data.indexes.iter() { + let block_index_fragment = self + .block_indexes + .entry(index_fragment.fragment_id) + .or_default(); + + for index in index_fragment.indexes.iter() { + let block_index = block_index_fragment.entry(index.index_id).or_default(); + match &index.index { + index::Index::Bitmap(bitmap_index) => { + for key in bitmap_index.keys() { + block_index.insert(key.clone(), block_number); + } + } + } + } + } + } + + Ok(()) + } + + pub fn build(self) -> Result { + let Some((first_block, last_block)) = self.block_range else { + return Err(CompactionError).attach_printable("segment group builder has no segments"); + }; + + let range_start = first_block.number as u32; + let range_len = (last_block.number - first_block.number + 1) as u32; + + let mut indexes = Vec::new(); + + for (fragment_id, fragment_indexes) in self.block_indexes.into_iter() { + let fragment_indexes = fragment_indexes + .into_iter() + .map(|(index_id, index_builder)| { + let index = index_builder.build().change_context(CompactionError)?; + Ok(fragment::Index { + index_id, + index: index.into(), + }) + }) + .collect::, _>>()?; + + indexes.push(IndexFragment { + fragment_id, + range_start, + range_len, + indexes: fragment_indexes, + }) + } + + indexes.sort_by_key(|index| index.fragment_id); + + let index = IndexGroupFragment { indexes }; + + Ok(SegmentGroup { first_block, index }) + } +} diff --git a/common/src/compaction/mod.rs b/common/src/compaction/mod.rs new file mode 100644 index 00000000..76af95a2 --- /dev/null +++ b/common/src/compaction/mod.rs @@ -0,0 +1,120 @@ +mod cli; +mod error; +mod group; +mod group_builder; +mod segment; +mod segment_builder; +mod service; + +use apibara_etcd::{EtcdClient, LockOptions}; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::{error, info, warn}; + +use crate::file_cache::FileCache; +use crate::{chain_view::ChainView, object_store::ObjectStore, options_store::OptionsStore}; + +pub use self::cli::CompactionArgs; +pub use self::error::CompactionError; +pub use self::service::{CompactionService, CompactionServiceOptions}; + +pub async fn compaction_service_loop( + etcd_client: EtcdClient, + object_store: ObjectStore, + chain_view: tokio::sync::watch::Receiver>, + file_cache: FileCache, + options: CompactionServiceOptions, + ct: CancellationToken, +) -> Result<(), CompactionError> { + let mut lock_client = etcd_client.lock_client(LockOptions::default()); + + while !ct.is_cancelled() { + info!("acquiring compaction lock"); + + let Some(mut lock) = lock_client + .lock("compaction/lock", ct.clone()) + .await + .change_context(CompactionError) + .attach_printable("failed to acquire compaction lock")? + else { + warn!("failed to acquire compaction lock"); + break; + }; + + // Load options from etcd and check if they match the current options. + let mut options_store = OptionsStore::new(&etcd_client); + + if let Some(segment_size) = options_store + .get_segment_size() + .await + .change_context(CompactionError) + .attach_printable("failed to get segment size options")? + { + if segment_size != options.segment_size { + return Err(CompactionError) + .attach_printable("segment size changed") + .attach_printable_lazy(|| format!("stored segment size: {}", segment_size)) + .attach_printable_lazy(|| { + format!("new segment size: {}", options.segment_size) + }); + } + } else { + options_store + .set_segment_size(options.segment_size) + .await + .change_context(CompactionError) + .attach_printable("failed to set segment size options")?; + } + + if let Some(group_size) = options_store + .get_group_size() + .await + .change_context(CompactionError) + .attach_printable("failed to get group size options")? + { + if group_size != options.group_size { + return Err(CompactionError) + .attach_printable("group size changed") + .attach_printable_lazy(|| format!("stored group size: {}", group_size)) + .attach_printable_lazy(|| format!("new group size: {}", options.group_size)); + } + } else { + options_store + .set_group_size(options.group_size) + .await + .change_context(CompactionError) + .attach_printable("failed to set group size options")?; + } + + let compaction_service = CompactionService::new( + etcd_client.clone(), + object_store.clone(), + file_cache.clone(), + chain_view.clone(), + options.clone(), + ); + + match compaction_service.start(&mut lock, ct.clone()).await { + Ok(_) => { + lock_client + .unlock(lock) + .await + .change_context(CompactionError)?; + info!("compaction lock released"); + break; + } + Err(err) => { + error!(error = ?err, "compaction service error"); + lock_client + .unlock(lock) + .await + .change_context(CompactionError)?; + + // TODO: configurable with exponential backoff. + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + } + } + } + + Ok(()) +} diff --git a/common/src/compaction/segment.rs b/common/src/compaction/segment.rs new file mode 100644 index 00000000..dd83fa18 --- /dev/null +++ b/common/src/compaction/segment.rs @@ -0,0 +1,170 @@ +use error_stack::{Result, ResultExt}; +use futures::FutureExt; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use crate::{ + block_store::{BlockStoreReader, BlockStoreWriter}, + chain_view::{ChainView, NextCursor}, + file_cache::FileCacheError, + ingestion::IngestionStateClient, +}; + +use super::{segment_builder::SegmentBuilder, CompactionError}; + +pub struct SegmentService { + segment_size: usize, + chain_view: ChainView, + block_store_reader: BlockStoreReader, + block_store_writer: BlockStoreWriter, + state_client: IngestionStateClient, +} + +impl SegmentService { + pub fn new( + segment_size: usize, + chain_view: ChainView, + block_store_reader: BlockStoreReader, + block_store_writer: BlockStoreWriter, + state_client: IngestionStateClient, + ) -> Self { + Self { + segment_size, + chain_view, + block_store_reader, + block_store_writer, + state_client, + } + } + + pub async fn start(mut self, ct: CancellationToken) -> Result<(), CompactionError> { + let mut builder = SegmentBuilder::default(); + let chain_view = self.chain_view; + + loop { + if ct.is_cancelled() { + return Ok(()); + } + + let first_block_in_segment = if let Some(cursor) = chain_view + .get_segmented_cursor() + .await + .change_context(CompactionError)? + { + let NextCursor::Continue(cursor) = chain_view + .get_next_cursor(&Some(cursor.clone())) + .await + .change_context(CompactionError)? + else { + return Err(CompactionError) + .attach_printable("chain view returned invalid cursor") + .attach_printable_lazy(|| format!("cursor: {cursor}")); + }; + cursor + } else { + chain_view + .get_starting_cursor() + .await + .change_context(CompactionError)? + }; + + let head = chain_view + .get_head() + .await + .change_context(CompactionError)?; + let finalized = chain_view + .get_finalized_cursor() + .await + .change_context(CompactionError)?; + + info!( + next_cursor = %first_block_in_segment, + head = %head, + finalized = %finalized, + "compaction: segment tick" + ); + + let latest_available = u64::min(finalized.number, head.number); + + if first_block_in_segment.number + self.segment_size as u64 <= latest_available { + info!( + starting_cursor = %first_block_in_segment, + "creating new segment" + ); + + let mut current = first_block_in_segment.clone(); + let mut last_block_in_segment = first_block_in_segment.clone(); + + builder + .start_new_segment(current.clone()) + .change_context(CompactionError)?; + + for _ in 0..self.segment_size { + let entry = self + .block_store_reader + .get_block(¤t) + .await + .map_err(FileCacheError::Foyer) + .change_context(CompactionError) + .attach_printable("failed to get block") + .attach_printable_lazy(|| format!("cursor: {current}"))?; + + builder + .add_block(¤t, entry.value()) + .change_context(CompactionError) + .attach_printable("failed to add block to segment") + .attach_printable_lazy(|| format!("cursor: {current}"))?; + + let NextCursor::Continue(next_cursor) = chain_view + .get_next_cursor(&Some(current.clone())) + .await + .change_context(CompactionError)? + else { + return Err(CompactionError) + .attach_printable("chain view returned invalid next cursor") + .attach_printable_lazy(|| format!("cursor: {current}")); + }; + + last_block_in_segment = current.clone(); + current = next_cursor; + } + + let segment_data = builder.segment_data().change_context(CompactionError)?; + + info!( + first_block = %first_block_in_segment, + last_block = %last_block_in_segment, + "uploading segment to object store" + ); + + for segment in segment_data { + self.block_store_writer + .put_segment(&first_block_in_segment, segment) + .await + .change_context(CompactionError) + .attach_printable("failed to put segment")?; + } + + self.state_client + .put_segmented(last_block_in_segment.number) + .await + .change_context(CompactionError) + .attach_printable("failed to put segmented block")?; + } else { + let state_change = if finalized.number < head.number { + info!("compaction waiting for finalized change"); + chain_view.finalized_changed().boxed() + } else { + info!("compaction waiting for head change"); + chain_view.head_changed().boxed() + }; + + tokio::pin!(state_change); + + let Some(_) = ct.run_until_cancelled(state_change).await else { + return Ok(()); + }; + } + } + } +} diff --git a/common/src/compaction/segment_builder.rs b/common/src/compaction/segment_builder.rs new file mode 100644 index 00000000..ebddee78 --- /dev/null +++ b/common/src/compaction/segment_builder.rs @@ -0,0 +1,212 @@ +use std::collections::HashMap; + +use bytes::Bytes; +use error_stack::{Result, ResultExt}; + +use crate::{ + fragment::{ + Block, BodyFragment, HeaderFragment, IndexGroupFragment, JoinGroupFragment, + HEADER_FRAGMENT_NAME, INDEX_FRAGMENT_NAME, JOIN_FRAGMENT_NAME, + }, + segment::{FragmentData, Segment, SerializedSegment}, + Cursor, +}; + +use super::CompactionError; + +#[derive(Debug, Default)] +pub struct SegmentBuilder { + expected_fragment_count: Option, + first_block: Option, + headers: Vec>, + indexes: Vec>, + joins: Vec>, + body: HashMap>)>, +} + +impl SegmentBuilder { + pub fn start_new_segment(&mut self, first_block: Cursor) -> Result<(), CompactionError> { + if self.first_block.is_some() { + return Err(CompactionError) + .attach_printable("segment builder already started a segment"); + } + + self.first_block = Some(first_block); + + Ok(()) + } + + pub fn add_block(&mut self, cursor: &Cursor, bytes: &Bytes) -> Result<(), CompactionError> { + let block = rkyv::from_bytes::(bytes) + .change_context(CompactionError) + .attach_printable("failed to access block")?; + + if let Some(expected_count) = self.expected_fragment_count { + if block.body.len() != expected_count { + return Err(CompactionError) + .attach_printable("block does not have the expected number of fragments") + .attach_printable("this is a bug in the network-specific ingestion code") + .attach_printable_lazy(|| format!("expected: {}", expected_count)) + .attach_printable_lazy(|| format!("actual: {}", block.body.len())); + } + } else { + self.expected_fragment_count = Some(block.body.len()); + } + + { + let index = block.index; + + let fragment_data = FragmentData { + cursor: cursor.clone(), + data: index, + }; + + self.indexes.push(fragment_data); + } + + { + let join = block.join; + + let fragment_data = FragmentData { + cursor: cursor.clone(), + data: join, + }; + + self.joins.push(fragment_data); + } + + { + let header = block.header; + + let fragment_data = FragmentData { + cursor: cursor.clone(), + data: header, + }; + + self.headers.push(fragment_data); + } + + for fragment in block.body.into_iter() { + let fragment_id = fragment.fragment_id; + let name = fragment.name.to_string(); + + let data = FragmentData { + cursor: cursor.clone(), + data: fragment, + }; + + if let Some(existing) = self.body.get_mut(&fragment_id) { + existing.1.push(data); + } else { + self.body.insert(fragment_id, (name, vec![data])); + } + } + + Ok(()) + } + + pub fn segment_data(&mut self) -> Result, CompactionError> { + let Some(first_block) = self.first_block.take() else { + return Err(CompactionError).attach_printable("no segment started"); + }; + + let indexes = std::mem::take(&mut self.indexes); + let joins = std::mem::take(&mut self.joins); + let headers = std::mem::take(&mut self.headers); + let segments = std::mem::take(&mut self.body); + let expected_fragment_count = headers.len(); + + let mut serialized = Vec::with_capacity(segments.len()); + + if indexes.len() != headers.len() { + return Err(CompactionError) + .attach_printable("index, header, and body fragments do not match") + .attach_printable_lazy(|| format!("indexes len: {}", indexes.len())) + .attach_printable_lazy(|| format!("headers len: {}", headers.len())); + } + + if joins.len() != headers.len() { + return Err(CompactionError) + .attach_printable("join, header, and body fragments do not match") + .attach_printable_lazy(|| format!("joins len: {}", joins.len())) + .attach_printable_lazy(|| format!("headers len: {}", headers.len())); + } + + { + let segment = Segment { + first_block: first_block.clone(), + data: indexes, + }; + + let data = rkyv::to_bytes::(&segment) + .change_context(CompactionError) + .attach_printable("failed to serialize index segment")?; + let data = Bytes::copy_from_slice(data.as_slice()); + + serialized.push(SerializedSegment { + name: INDEX_FRAGMENT_NAME.to_string(), + data, + }); + } + + { + let segment = Segment { + first_block: first_block.clone(), + data: joins, + }; + + let data = rkyv::to_bytes::(&segment) + .change_context(CompactionError) + .attach_printable("failed to serialize join segment")?; + let data = Bytes::copy_from_slice(data.as_slice()); + + serialized.push(SerializedSegment { + name: JOIN_FRAGMENT_NAME.to_string(), + data, + }); + } + + { + let segment = Segment { + first_block: first_block.clone(), + data: headers, + }; + + let data = rkyv::to_bytes::(&segment) + .change_context(CompactionError) + .attach_printable("failed to serialize header segment")?; + let data = Bytes::copy_from_slice(data.as_slice()); + + serialized.push(SerializedSegment { + name: HEADER_FRAGMENT_NAME.to_string(), + data, + }); + } + + for (name, data) in segments.into_values() { + if data.len() != expected_fragment_count { + return Err(CompactionError) + .attach_printable("body fragments do not match") + .attach_printable_lazy(|| format!("expected: {}", expected_fragment_count)) + .attach_printable_lazy(|| format!("actual: {}", data.len())); + } + + let segment = Segment { + first_block: first_block.clone(), + data, + }; + + let data = rkyv::to_bytes::(&segment) + .change_context(CompactionError) + .attach_printable("failed to serialize segment")?; + let data = Bytes::copy_from_slice(data.as_slice()); + + serialized.push(SerializedSegment { name, data }); + } + + // NOTE: we leave the expected_fragment_count field as is because we want the data to be consistent + // across all segments. + + Ok(serialized) + } +} diff --git a/common/src/compaction/service.rs b/common/src/compaction/service.rs new file mode 100644 index 00000000..29adc02f --- /dev/null +++ b/common/src/compaction/service.rs @@ -0,0 +1,142 @@ +use apibara_etcd::{EtcdClient, Lock}; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info}; + +use crate::{ + block_store::{BlockStoreReader, BlockStoreWriter}, + chain_view::ChainView, + compaction::group::SegmentGroupService, + file_cache::FileCache, + ingestion::IngestionStateClient, + object_store::ObjectStore, +}; + +use super::{error::CompactionError, segment::SegmentService}; + +#[derive(Debug, Clone)] +pub struct CompactionServiceOptions { + /// How many blocks in a single segment. + pub segment_size: usize, + /// How many segments in a single segment group. + pub group_size: usize, +} + +pub struct CompactionService { + options: CompactionServiceOptions, + block_store_reader: BlockStoreReader, + block_store_writer: BlockStoreWriter, + state_client: IngestionStateClient, + chain_view: tokio::sync::watch::Receiver>, +} + +impl CompactionService { + pub fn new( + etcd_client: EtcdClient, + object_store: ObjectStore, + file_cache: FileCache, + chain_view: tokio::sync::watch::Receiver>, + options: CompactionServiceOptions, + ) -> Self { + let block_store_reader = BlockStoreReader::new(object_store.clone(), file_cache); + let block_store_writer = BlockStoreWriter::new(object_store); + let state_client = IngestionStateClient::new(&etcd_client); + + Self { + options, + block_store_reader, + block_store_writer, + chain_view, + state_client, + } + } + + pub async fn start( + self, + lock: &mut Lock, + ct: CancellationToken, + ) -> Result<(), CompactionError> { + let chain_view = loop { + lock.keep_alive().await.change_context(CompactionError)?; + + if let Some(chain_view) = self.chain_view.borrow().clone() { + break chain_view; + }; + + let Some(_) = ct + .run_until_cancelled(tokio::time::sleep(std::time::Duration::from_secs(5))) + .await + else { + return Ok(()); + }; + }; + + let segment_service = SegmentService::new( + self.options.segment_size, + chain_view.clone(), + self.block_store_reader.clone(), + self.block_store_writer.clone(), + self.state_client.clone(), + ); + + let group_service = SegmentGroupService::new( + self.options.segment_size, + self.options.group_size, + chain_view.clone(), + self.block_store_reader.clone(), + self.block_store_writer.clone(), + self.state_client.clone(), + ); + + let segment_service_handle = tokio::spawn(segment_service.start(ct.clone())); + let group_service_handle = tokio::spawn(group_service.start(ct.clone())); + + let lock_handle = lock_keep_alive_loop(lock, ct.clone()); + + tokio::select! { + _ = ct.cancelled() => { + Ok(()) + } + lock = lock_handle => { + info!("compaction lock loop terminated"); + lock.change_context(CompactionError) + } + segment_service = segment_service_handle => { + info!("compaction segmentation loop terminated"); + segment_service.change_context(CompactionError)?.change_context(CompactionError) + } + group_service = group_service_handle => { + info!("compaction group service loop terminated"); + group_service.change_context(CompactionError)?.change_context(CompactionError) + } + } + } +} + +async fn lock_keep_alive_loop( + lock: &mut Lock, + ct: CancellationToken, +) -> Result<(), CompactionError> { + let mut interval = tokio::time::interval(std::time::Duration::from_secs(5)); + + loop { + tokio::select! { + _ = ct.cancelled() => { + return Ok(()); + } + _ = interval.tick() => { + debug!("compaction: keep alive"); + lock.keep_alive().await.change_context(CompactionError)?; + } + } + } +} + +impl Default for CompactionServiceOptions { + fn default() -> Self { + Self { + segment_size: 1_000, + group_size: 100, + } + } +} diff --git a/common/src/core.rs b/common/src/core.rs new file mode 100644 index 00000000..da0eba7a --- /dev/null +++ b/common/src/core.rs @@ -0,0 +1,119 @@ +use apibara_dna_protocol::dna; +use rkyv::{Archive, Deserialize, Serialize}; + +/// Arbitrary length hash. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Archive, Serialize, Deserialize, Default)] +pub struct Hash(pub Vec); + +/// Cursor uniquely identifies a block by its number and hash. +#[derive(Clone, PartialEq, Eq, Hash, Archive, Serialize, Deserialize)] +pub struct Cursor { + pub number: u64, + pub hash: Hash, +} + +pub trait GetCursor { + /// Returns the current cursor. + fn cursor(&self) -> Option; +} + +impl Cursor { + pub fn new_finalized(number: u64) -> Self { + Self { + number, + hash: Default::default(), + } + } + + pub fn strict_before(&self, other: &Self) -> bool { + self.number < other.number + } + + pub fn strict_after(&self, other: &Self) -> bool { + self.number > other.number + } + + pub fn new(number: u64, hash: Hash) -> Self { + Self { number, hash } + } + + pub fn hash_as_hex(&self) -> String { + format!("{}", self.hash) + } +} + +impl Hash { + pub fn as_slice(&self) -> &[u8] { + &self.0 + } + + pub fn is_zero(&self) -> bool { + self.0.iter().all(|b| *b == 0) + } +} + +impl std::fmt::Debug for Cursor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Cursor(n={} h={})", self.number, self.hash_as_hex()) + } +} + +impl std::fmt::Display for Cursor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", self.number, self.hash_as_hex()) + } +} + +impl std::fmt::Debug for Hash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Hash({})", self) + } +} + +impl std::fmt::Display for Hash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.0.is_empty() { + return write!(f, "0x0"); + } + write!(f, "0x{}", hex::encode(&self.0)) + } +} + +impl std::fmt::Display for ArchivedHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.0.is_empty() { + return write!(f, "0x0"); + } + write!(f, "0x{}", hex::encode(&self.0)) + } +} + +impl From for dna::stream::Cursor { + fn from(value: Cursor) -> Self { + Self { + order_key: value.number, + unique_key: value.hash.0, + } + } +} + +impl From for Cursor { + fn from(value: dna::stream::Cursor) -> Self { + Self { + number: value.order_key, + hash: Hash(value.unique_key), + } + } +} + +pub mod testing { + /// Returns a new test cursor where the hash depends on the cursor number and chain. + pub fn new_test_cursor(number: u64, chain: u8) -> super::Cursor { + let formatted = format!("{number:016x}{chain:02x}"); + let hash = hex::decode(formatted).expect("valid hash"); + super::Cursor { + number, + hash: super::Hash(hash), + } + } +} diff --git a/common/src/data_stream/filter.rs b/common/src/data_stream/filter.rs new file mode 100644 index 00000000..cd016e71 --- /dev/null +++ b/common/src/data_stream/filter.rs @@ -0,0 +1,48 @@ +use std::collections::{BTreeMap, HashSet}; + +use roaring::RoaringBitmap; + +use crate::query::{BlockFilter, FilterId}; + +pub trait BlockFilterFactory { + fn create_block_filter( + &self, + filters: &[Vec], + ) -> tonic::Result, tonic::Status>; +} + +#[derive(Debug, Default)] +pub struct FilterMatch(BTreeMap>); + +#[derive(Debug)] +pub struct Match { + pub index: u32, + pub filter_ids: Vec, +} + +impl FilterMatch { + pub fn clear(&mut self) { + self.0.clear(); + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn add_match(&mut self, filter_id: FilterId, bitmap: &RoaringBitmap) { + for index in bitmap.iter() { + self.0.entry(index).or_default().insert(filter_id); + } + } + + pub fn add_single_match(&mut self, filter_id: FilterId, index: u32) { + self.0.entry(index).or_default().insert(filter_id); + } + + pub fn iter(&self) -> impl Iterator + '_ { + self.0.iter().map(|(index, filter_ids)| Match { + index: *index, + filter_ids: filter_ids.iter().copied().collect(), + }) + } +} diff --git a/common/src/data_stream/fragment_access.rs b/common/src/data_stream/fragment_access.rs new file mode 100644 index 00000000..f80a5e9a --- /dev/null +++ b/common/src/data_stream/fragment_access.rs @@ -0,0 +1,428 @@ +use error_stack::{Result, ResultExt}; + +use crate::{ + block_store::BlockStoreReader, + core::Cursor, + file_cache::{CachedFile, FileCacheError}, + fragment::{ + Block, BodyFragment, FragmentId, HeaderFragment, IndexFragment, IndexGroupFragment, + JoinFragment, JoinGroupFragment, HEADER_FRAGMENT_ID, HEADER_FRAGMENT_NAME, + INDEX_FRAGMENT_NAME, JOIN_FRAGMENT_NAME, + }, + segment::Segment, +}; + +#[derive(Debug)] +pub struct FragmentAccessError; + +pub struct FragmentAccess { + inner: InnerAccess, +} + +enum InnerAccess { + Block { + store: BlockStoreReader, + block_cursor: Cursor, + }, + Segment { + store: BlockStoreReader, + segment_cursor: Cursor, + offset: usize, + }, +} + +impl FragmentAccess { + pub fn new_in_block(store: BlockStoreReader, block_cursor: Cursor) -> Self { + FragmentAccess { + inner: InnerAccess::Block { + store, + block_cursor, + }, + } + } + + pub fn new_in_segment(store: BlockStoreReader, segment_cursor: Cursor, offset: usize) -> Self { + FragmentAccess { + inner: InnerAccess::Segment { + store, + segment_cursor, + offset, + }, + } + } + + pub async fn get_fragment_indexes( + &self, + fragment_id: FragmentId, + ) -> Result, FragmentAccessError> { + self.inner.get_fragment_indexes(fragment_id).await + } + + pub async fn get_fragment_joins( + &self, + fragment_id: FragmentId, + ) -> Result, FragmentAccessError> { + self.inner.get_fragment_joins(fragment_id).await + } + + pub async fn get_header_fragment(&self) -> Result, FragmentAccessError> { + self.inner.get_header_fragment().await + } + + pub async fn get_body_fragment( + &self, + fragment_id: FragmentId, + fragment_name: String, + ) -> Result, FragmentAccessError> { + self.inner + .get_body_fragment(fragment_id, fragment_name) + .await + } +} + +impl InnerAccess { + async fn get_fragment_indexes( + &self, + fragment_id: FragmentId, + ) -> Result, FragmentAccessError> { + match self { + InnerAccess::Block { + store, + block_cursor, + } => { + let inner = store + .get_block(block_cursor) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Block { + inner, + fragment_id, + _phantom: Default::default(), + }) + } + InnerAccess::Segment { + store, + segment_cursor, + offset, + } => { + let inner = store + .get_segment(segment_cursor, INDEX_FRAGMENT_NAME) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Segment { + inner, + fragment_id, + offset: *offset, + _phantom: Default::default(), + }) + } + } + } + + async fn get_fragment_joins( + &self, + fragment_id: FragmentId, + ) -> Result, FragmentAccessError> { + match self { + InnerAccess::Block { + store, + block_cursor, + } => { + let inner = store + .get_block(block_cursor) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Block { + inner, + fragment_id, + _phantom: Default::default(), + }) + } + InnerAccess::Segment { + store, + segment_cursor, + offset, + } => { + let inner = store + .get_segment(segment_cursor, JOIN_FRAGMENT_NAME) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Segment { + inner, + fragment_id, + offset: *offset, + _phantom: Default::default(), + }) + } + } + } + + async fn get_body_fragment( + &self, + fragment_id: FragmentId, + fragment_name: String, + ) -> Result, FragmentAccessError> { + match self { + InnerAccess::Block { + store, + block_cursor, + } => { + let inner = store + .get_block(block_cursor) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Block { + inner, + fragment_id, + _phantom: Default::default(), + }) + } + InnerAccess::Segment { + store, + segment_cursor, + offset, + } => { + let inner = store + .get_segment(segment_cursor, fragment_name) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Segment { + inner, + fragment_id: HEADER_FRAGMENT_ID, + offset: *offset, + _phantom: Default::default(), + }) + } + } + } + + async fn get_header_fragment(&self) -> Result, FragmentAccessError> { + match self { + InnerAccess::Block { + store, + block_cursor, + } => { + let inner = store + .get_block(block_cursor) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Block { + inner, + fragment_id: HEADER_FRAGMENT_ID, + _phantom: Default::default(), + }) + } + InnerAccess::Segment { + store, + segment_cursor, + offset, + } => { + let inner = store + .get_segment(segment_cursor, HEADER_FRAGMENT_NAME) + .await + .map_err(FileCacheError::Foyer) + .change_context(FragmentAccessError)?; + + Ok(Access::Segment { + inner, + fragment_id: HEADER_FRAGMENT_ID, + offset: *offset, + _phantom: Default::default(), + }) + } + } + } +} + +pub enum Access { + Block { + inner: CachedFile, + fragment_id: FragmentId, + _phantom: std::marker::PhantomData, + }, + Segment { + inner: CachedFile, + offset: usize, + fragment_id: FragmentId, + _phantom: std::marker::PhantomData, + }, +} + +impl Access { + pub fn access(&self) -> Result<&rkyv::Archived, FragmentAccessError> { + match self { + Access::Block { + inner, fragment_id, .. + } => { + let block = + unsafe { rkyv::access_unchecked::>(inner.value()) }; + + let Some(pos) = block + .index + .indexes + .iter() + .position(|f| f.fragment_id == *fragment_id) + else { + return Err(FragmentAccessError) + .attach_printable("index for fragment not found") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + Ok(&block.index.indexes[pos]) + } + Access::Segment { + inner, + offset, + fragment_id, + .. + } => { + let segment = unsafe { + rkyv::access_unchecked::>>( + inner.value(), + ) + }; + + let block_index = &segment.data[*offset]; + + let Some(pos) = block_index + .data + .indexes + .iter() + .position(|f| f.fragment_id == *fragment_id) + else { + return Err(FragmentAccessError) + .attach_printable("index for fragment not found") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + Ok(&block_index.data.indexes[pos]) + } + } + } +} + +impl Access { + pub fn access(&self) -> Result<&rkyv::Archived, FragmentAccessError> { + match self { + Access::Block { + inner, fragment_id, .. + } => { + let block = + unsafe { rkyv::access_unchecked::>(inner.value()) }; + + let Some(pos) = block + .join + .joins + .iter() + .position(|f| f.fragment_id == *fragment_id) + else { + return Err(FragmentAccessError) + .attach_printable("joins for fragment not found") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + Ok(&block.join.joins[pos]) + } + Access::Segment { + inner, + offset, + fragment_id, + .. + } => { + let segment = unsafe { + rkyv::access_unchecked::>>( + inner.value(), + ) + }; + + let block_index = &segment.data[*offset]; + + let Some(pos) = block_index + .data + .joins + .iter() + .position(|f| f.fragment_id == *fragment_id) + else { + return Err(FragmentAccessError) + .attach_printable("joins for fragment not found") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + Ok(&block_index.data.joins[pos]) + } + } + } +} + +impl Access { + pub fn access(&self) -> Result<&rkyv::Archived, FragmentAccessError> { + match self { + Access::Block { inner, .. } => { + let block = + unsafe { rkyv::access_unchecked::>(inner.value()) }; + Ok(&block.header) + } + Access::Segment { inner, offset, .. } => { + let segment = unsafe { + rkyv::access_unchecked::>>(inner) + }; + + Ok(&segment.data[*offset].data) + } + } + } +} + +impl Access { + pub fn access(&self) -> Result<&rkyv::Archived, FragmentAccessError> { + match self { + Access::Block { + inner, fragment_id, .. + } => { + let block = + unsafe { rkyv::access_unchecked::>(inner.value()) }; + + let Some(pos) = block + .body + .iter() + .position(|f| f.fragment_id == *fragment_id) + else { + return Err(FragmentAccessError) + .attach_printable("body for fragment not found") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + Ok(&block.body[pos]) + } + Access::Segment { inner, offset, .. } => { + let segment = unsafe { + rkyv::access_unchecked::>>(inner) + }; + + Ok(&segment.data[*offset].data) + } + } + } +} + +impl error_stack::Context for FragmentAccessError {} + +impl std::fmt::Display for FragmentAccessError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "fragment access error") + } +} diff --git a/common/src/data_stream/mod.rs b/common/src/data_stream/mod.rs new file mode 100644 index 00000000..4b61424d --- /dev/null +++ b/common/src/data_stream/mod.rs @@ -0,0 +1,7 @@ +mod filter; +mod fragment_access; +mod stream; + +pub use self::filter::{BlockFilterFactory, FilterMatch}; +pub use self::fragment_access::FragmentAccess; +pub use self::stream::{DataStream, DataStreamError}; diff --git a/common/src/data_stream/scanner.rs b/common/src/data_stream/scanner.rs new file mode 100644 index 00000000..e2b79506 --- /dev/null +++ b/common/src/data_stream/scanner.rs @@ -0,0 +1,83 @@ +use std::ops::RangeInclusive; + +use error_stack::Result; +use futures::Future; +use roaring::RoaringBitmap; + +use crate::query::BlockFilter; + +use super::fragment_access::FragmentAccess; + +pub trait BlockFilterFactory { + fn create_block_filter( + &self, + filters: &[Vec], + ) -> tonic::Result, tonic::Status>; +} + +/* +#[derive(Debug)] +pub struct ScannerError; + +/// Action to take. +#[derive(Debug, PartialEq)] +pub enum ScannerAction { + /// Continue scanning. + Continue, + /// Stop scanning. + Stop, +} + +/// Send the specified data to the client. +pub struct SendData { + pub cursor: Option, + pub end_cursor: Cursor, + pub data: Vec>, +} + +/// Information about a block in a segment. +#[derive(Debug, Clone)] +pub struct SegmentBlock { + /// The block's cursor. + pub cursor: Option, + /// The block's end cursor. + pub end_cursor: Cursor, + /// Offset of the block in the segment. + pub offset: usize, +} + +pub trait ScannerFactory { + type Scanner: Scanner; + + fn create_scanner(&self, filters: &[Vec]) -> tonic::Result; +} + +pub trait Scanner: Send { + /// Fills the given bitmap with the blocks that match the filters. + fn fill_block_bitmap( + &mut self, + group: &ArchivedSegmentGroup, + bitmap: &mut RoaringBitmap, + block_range: RangeInclusive, + ) -> impl Future> + Send; + + /// Scans a single block. + fn scan_single( + &mut self, + cursor: &Cursor, + fragment_access: &FragmentAccess, + cb: S, + ) -> impl Future> + Send + where + S: FnOnce(Vec>) + Send; +} + +impl error_stack::Context for ScannerError {} + +impl std::fmt::Display for ScannerError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "scanner error") + } +} + +*/ diff --git a/common/src/data_stream/stream.rs b/common/src/data_stream/stream.rs new file mode 100644 index 00000000..5de2bbd1 --- /dev/null +++ b/common/src/data_stream/stream.rs @@ -0,0 +1,697 @@ +use std::{ + collections::{BTreeMap, HashMap}, + time::Duration, +}; + +use apibara_dna_protocol::dna::stream::{ + stream_data_response::Message, Data, DataFinality, Finalize, Heartbeat, Invalidate, + StreamDataResponse, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use error_stack::{Result, ResultExt}; +use roaring::RoaringBitmap; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; +use tracing::{debug, warn}; + +use crate::{ + block_store::BlockStoreReader, + chain_view::{CanonicalCursor, ChainView, NextCursor}, + data_stream::{FilterMatch, FragmentAccess}, + file_cache::FileCacheError, + fragment::{FragmentId, HEADER_FRAGMENT_ID}, + join::ArchivedJoinTo, + query::BlockFilter, + segment::SegmentGroup, + Cursor, +}; + +#[derive(Debug)] +pub struct DataStreamError; + +pub struct DataStream { + block_filter: Vec, + current: Option, + finalized: Cursor, + _finality: DataFinality, + chain_view: ChainView, + store: BlockStoreReader, + fragment_id_to_name: HashMap, + heartbeat_interval: tokio::time::Interval, + _permit: tokio::sync::OwnedSemaphorePermit, +} + +type DataStreamMessage = tonic::Result; + +const DEFAULT_BLOCKS_BUFFER_SIZE: usize = 1024 * 1024; + +/// Information about a block in a segment. +#[derive(Debug, Clone)] +struct SegmentBlock { + /// The block's cursor. + pub cursor: Option, + /// The block's end cursor. + pub end_cursor: Cursor, + /// Offset of the block in the segment. + pub offset: usize, +} + +impl DataStream { + #[allow(clippy::too_many_arguments)] + pub fn new( + block_filter: Vec, + starting: Option, + finalized: Cursor, + finality: DataFinality, + heartbeat_interval: Duration, + chain_view: ChainView, + fragment_id_to_name: HashMap, + store: BlockStoreReader, + permit: tokio::sync::OwnedSemaphorePermit, + ) -> Self { + let heartbeat_interval = tokio::time::interval(heartbeat_interval); + Self { + block_filter, + current: starting, + finalized, + _finality: finality, + heartbeat_interval, + chain_view, + fragment_id_to_name, + store, + _permit: permit, + } + } + + pub async fn start( + mut self, + tx: mpsc::Sender, + ct: CancellationToken, + ) -> Result<(), DataStreamError> { + while !ct.is_cancelled() && !tx.is_closed() { + if let Err(err) = self.tick(&tx, &ct).await { + warn!(error = ?err, "data stream error"); + tx.send(Err(tonic::Status::internal("internal server error"))) + .await + .change_context(DataStreamError)?; + return Err(err).change_context(DataStreamError); + } + } + + Ok(()) + } + + async fn tick( + &mut self, + tx: &mpsc::Sender, + ct: &CancellationToken, + ) -> Result<(), DataStreamError> { + let next_cursor = match self + .chain_view + .get_next_cursor(&self.current) + .await + .change_context(DataStreamError)? + { + NextCursor::Continue(cursor) => cursor, + NextCursor::Invalidate(cursor) => { + debug!(cursor = %cursor, "invalidating data"); + + // TODO: collect removed blocks. + let invalidate = Message::Invalidate(Invalidate { + ..Default::default() + }); + + let Some(Ok(permit)) = ct.run_until_cancelled(tx.reserve()).await else { + return Ok(()); + }; + + permit.send(Ok(StreamDataResponse { + message: Some(invalidate), + })); + + self.heartbeat_interval.reset(); + self.current = Some(cursor); + + return Ok(()); + } + NextCursor::AtHead => { + debug!("head reached. waiting for new head"); + tokio::select! { + _ = ct.cancelled() => return Ok(()), + _ = self.heartbeat_interval.tick() => { + debug!("heartbeat"); + return self.send_heartbeat_message(tx, ct).await; + } + _ = self.chain_view.head_changed() => { + debug!("head changed"); + return Ok(()); + }, + _ = self.chain_view.finalized_changed() => { + debug!("finalized changed"); + self.finalized = self.chain_view.get_finalized_cursor().await.change_context(DataStreamError)?; + return self.send_finalize_message(tx, ct).await; + }, + } + } + }; + + if self + .chain_view + .has_group_for_block(next_cursor.number) + .await + { + return self.tick_group(next_cursor, tx, ct).await; + } + + if self + .chain_view + .has_segment_for_block(next_cursor.number) + .await + { + return self.tick_segment(next_cursor, tx, ct).await; + } + + self.tick_single(next_cursor, tx, ct).await + } + + async fn send_heartbeat_message( + &mut self, + tx: &mpsc::Sender, + ct: &CancellationToken, + ) -> Result<(), DataStreamError> { + debug!("tick: send heartbeat message"); + let Some(Ok(permit)) = ct.run_until_cancelled(tx.reserve()).await else { + return Ok(()); + }; + + let heartbeat = Message::Heartbeat(Heartbeat {}); + + permit.send(Ok(StreamDataResponse { + message: Some(heartbeat), + })); + + Ok(()) + } + + async fn send_finalize_message( + &mut self, + tx: &mpsc::Sender, + ct: &CancellationToken, + ) -> Result<(), DataStreamError> { + debug!("tick: send finalize message"); + let Some(Ok(permit)) = ct.run_until_cancelled(tx.reserve()).await else { + return Ok(()); + }; + + let finalize = Message::Finalize(Finalize { + cursor: Some(self.finalized.clone().into()), + }); + + permit.send(Ok(StreamDataResponse { + message: Some(finalize), + })); + + Ok(()) + } + + async fn tick_group( + &mut self, + cursor: Cursor, + tx: &mpsc::Sender, + ct: &CancellationToken, + ) -> Result<(), DataStreamError> { + debug!("tick: group"); + + let group_start = self.chain_view.get_group_start_block(cursor.number).await; + let group_end = self.chain_view.get_group_end_block(cursor.number).await; + + let group_start_cursor = Cursor::new_finalized(group_start); + let mut data_bitmap = RoaringBitmap::default(); + + { + let group_bytes = self + .store + .get_group(&group_start_cursor) + .await + .map_err(FileCacheError::Foyer) + .change_context(DataStreamError) + .attach_printable("failed to get group")?; + let group = + unsafe { rkyv::access_unchecked::>(&group_bytes) }; + + for block_filter in self.block_filter.iter() { + for (fragment_id, filters) in block_filter.iter() { + let Some(pos) = group + .index + .indexes + .iter() + .position(|f| f.fragment_id == *fragment_id) + else { + return Err(DataStreamError) + .attach_printable("missing index") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + let indexes = &group.index.indexes[pos]; + + for filter in filters { + let rows = filter.filter(indexes).change_context(DataStreamError)?; + data_bitmap |= &rows; + } + } + } + } + + debug!(blocks = ?data_bitmap, "group bitmap"); + + let mut segments = Vec::new(); + let mut current_segment_data = Vec::default(); + + let mut current_segment_start = self.chain_view.get_segment_start_block(group_start).await; + let mut current_segment_end = self.chain_view.get_segment_end_block(group_start).await; + + // let mut prefetch_tasks = JoinSet::new(); + for block_number in data_bitmap.iter() { + let block_number = block_number as u64; + + if block_number > current_segment_end { + let blocks = std::mem::take(&mut current_segment_data); + let current_segment_cursor = Cursor::new_finalized(current_segment_start); + + // TODO: prefetch segments. + // self.scanner + // .prefetch_segment(&mut prefetch_tasks, current_segment_cursor.clone()) + // .change_context(DataStreamError)?; + + segments.push((current_segment_cursor, blocks)); + + current_segment_start = self.chain_view.get_segment_start_block(block_number).await; + current_segment_end = self.chain_view.get_segment_end_block(block_number).await; + } + + let CanonicalCursor::Canonical(block_cursor) = self + .chain_view + .get_canonical(block_number) + .await + .change_context(DataStreamError)? + else { + return Err(DataStreamError) + .attach_printable("missing canonical block") + .attach_printable_lazy(|| format!("block number: {}", block_number)); + }; + + let previous_cursor = if block_number == 0 { + None + } else if let CanonicalCursor::Canonical(previous_cursor) = self + .chain_view + .get_canonical(block_number - 1) + .await + .change_context(DataStreamError)? + { + previous_cursor.into() + } else { + None + }; + + current_segment_data.push(SegmentBlock { + cursor: previous_cursor, + end_cursor: block_cursor.clone(), + offset: (block_number - current_segment_start) as usize, + }); + } + + let blocks = std::mem::take(&mut current_segment_data); + let current_segment_cursor = Cursor::new_finalized(current_segment_start); + + // TODO: prefetch segments. + // self.scanner + // .prefetch_segment(&mut prefetch_tasks, current_segment_cursor.clone()) + // .change_context(DataStreamError)?; + + segments.push((current_segment_cursor, blocks)); + + // prefetch_tasks.join_all().await; + + for (segment_cursor, segment_data) in segments { + if ct.is_cancelled() || tx.is_closed() { + return Ok(()); + } + + for block in segment_data { + use apibara_dna_protocol::dna::stream::Cursor as ProtoCursor; + + let fragment_access = FragmentAccess::new_in_segment( + self.store.clone(), + segment_cursor.clone(), + block.offset, + ); + + let proto_cursor: Option = block.cursor.map(Into::into); + let proto_end_cursor: Option = Some(block.end_cursor.clone().into()); + + let mut blocks = Vec::new(); + if self.filter_fragment(&fragment_access, &mut blocks).await? { + let data = Message::Data(Data { + cursor: proto_cursor.clone(), + end_cursor: proto_end_cursor.clone(), + data: blocks, + finality: DataFinality::Finalized as i32, + }); + + let Some(Ok(permit)) = ct.run_until_cancelled(tx.reserve()).await else { + return Ok(()); + }; + + permit.send(Ok(StreamDataResponse { + message: Some(data), + })); + } + } + } + + let CanonicalCursor::Canonical(group_end_cursor) = self + .chain_view + .get_canonical(group_end) + .await + .change_context(DataStreamError)? + else { + return Err(DataStreamError).attach_printable("missing canonical block"); + }; + + self.heartbeat_interval.reset(); + self.current = group_end_cursor.into(); + + Ok(()) + } + + async fn tick_segment( + &mut self, + cursor: Cursor, + tx: &mpsc::Sender, + ct: &CancellationToken, + ) -> Result<(), DataStreamError> { + let mut current = cursor.clone(); + + let segment_size = self.chain_view.get_segment_size().await; + let segment_start = self + .chain_view + .get_segment_start_block(current.number) + .await; + let segment_end = self.chain_view.get_segment_end_block(current.number).await; + + let starting_block_number = cursor.number; + // Notice that we could be starting from anywhere in the segment. + let base_offset = current.number - segment_start; + + let mut blocks = vec![SegmentBlock { + cursor: self.current.clone(), + end_cursor: current.clone(), + offset: base_offset as usize, + }]; + + for i in 1..segment_size { + if current.number >= segment_end { + break; + } + + let CanonicalCursor::Canonical(next_cursor) = self + .chain_view + .get_canonical(starting_block_number + i) + .await + .change_context(DataStreamError)? + else { + return Err(DataStreamError).attach_printable("missing canonical block"); + }; + + blocks.push(SegmentBlock { + cursor: current.clone().into(), + end_cursor: next_cursor.clone(), + offset: (base_offset + i) as usize, + }); + + current = next_cursor; + } + + let segment_cursor = Cursor::new_finalized(segment_start); + + for block in blocks { + use apibara_dna_protocol::dna::stream::Cursor as ProtoCursor; + + let fragment_access = FragmentAccess::new_in_segment( + self.store.clone(), + segment_cursor.clone(), + block.offset, + ); + let proto_cursor: Option = block.cursor.map(Into::into); + let proto_end_cursor: Option = Some(block.end_cursor.clone().into()); + + let mut blocks = Vec::new(); + if self.filter_fragment(&fragment_access, &mut blocks).await? { + let data = Message::Data(Data { + cursor: proto_cursor.clone(), + end_cursor: proto_end_cursor.clone(), + data: blocks, + finality: DataFinality::Finalized as i32, + }); + + let Some(Ok(permit)) = ct.run_until_cancelled(tx.reserve()).await else { + return Ok(()); + }; + + permit.send(Ok(StreamDataResponse { + message: Some(data), + })); + } + } + + self.heartbeat_interval.reset(); + self.current = current.into(); + + Ok(()) + } + + async fn tick_single( + &mut self, + cursor: Cursor, + tx: &mpsc::Sender, + ct: &CancellationToken, + ) -> Result<(), DataStreamError> { + use apibara_dna_protocol::dna::stream::Cursor as ProtoCursor; + + debug!("tick: single block"); + + debug!(cursor = ?self.current, end_cursor = %cursor, "sending data"); + + let proto_cursor: Option = self.current.clone().map(Into::into); + let proto_end_cursor: Option = Some(cursor.clone().into()); + + let finalized = self + .chain_view + .get_finalized_cursor() + .await + .change_context(DataStreamError)?; + + let finality = if finalized.strict_after(&cursor) { + DataFinality::Finalized + } else { + DataFinality::Accepted + }; + + let fragment_access = FragmentAccess::new_in_block(self.store.clone(), cursor.clone()); + + let mut blocks = Vec::new(); + if self.filter_fragment(&fragment_access, &mut blocks).await? { + let data = Message::Data(Data { + cursor: proto_cursor.clone(), + end_cursor: proto_end_cursor.clone(), + data: blocks, + finality: finality.into(), + }); + + let Some(Ok(permit)) = ct.run_until_cancelled(tx.reserve()).await else { + return Ok(()); + }; + + permit.send(Ok(StreamDataResponse { + message: Some(data), + })); + } + + self.heartbeat_interval.reset(); + self.current = Some(cursor); + + Ok(()) + } + + async fn filter_fragment( + &mut self, + fragment_access: &FragmentAccess, + output: &mut Vec, + ) -> Result { + let mut has_data = false; + + for block_filter in self.block_filter.iter() { + let mut data_buffer = BytesMut::with_capacity(DEFAULT_BLOCKS_BUFFER_SIZE); + let mut fragment_matches = BTreeMap::default(); + + let mut joins = BTreeMap::<(FragmentId, FragmentId), FilterMatch>::default(); + + for (fragment_id, filters) in block_filter.iter() { + let mut filter_match = FilterMatch::default(); + + let indexes = fragment_access + .get_fragment_indexes(*fragment_id) + .await + .change_context(DataStreamError) + .attach_printable("failed to get fragment indexes")?; + let indexes = indexes.access().change_context(DataStreamError)?; + + for filter in filters { + let rows = filter.filter(indexes).change_context(DataStreamError)?; + filter_match.add_match(filter.filter_id, &rows); + + for join_with_fragment_id in filter.joins.iter() { + joins + .entry((*fragment_id, *join_with_fragment_id)) + .or_default() + .add_match(filter.filter_id, &rows); + } + } + + if filter_match.is_empty() { + continue; + } + + fragment_matches.insert(*fragment_id, filter_match); + } + + for ((source_fragment_id, target_fragment_id), filter_match) in joins.into_iter() { + // Data is cached so it's fine to read it multiple times. + // We could group by `source_fragment_id` to cleanup the code. + let join_fragment = fragment_access + .get_fragment_joins(source_fragment_id) + .await + .change_context(DataStreamError) + .attach_printable("failed to get join fragment")?; + let join_fragment = join_fragment.access().change_context(DataStreamError)?; + + let Some(target_pos) = join_fragment + .joins + .iter() + .position(|f| f.to_fragment_id == target_fragment_id) + else { + return Err(DataStreamError) + .attach_printable("join fragment not found") + .attach_printable_lazy(|| { + format!("source fragment id: {}", source_fragment_id) + }) + .attach_printable_lazy(|| { + format!("target fragment id: {}", target_fragment_id) + }); + }; + let join = &join_fragment.joins[target_pos]; + + let target_fragment_matches = + fragment_matches.entry(target_fragment_id).or_default(); + + match &join.index { + ArchivedJoinTo::One(inner) => { + for match_ in filter_match.iter() { + if let Some(index) = inner.get(&match_.index) { + for filter_id in match_.filter_ids.iter() { + target_fragment_matches.add_single_match(*filter_id, index); + } + } + } + } + ArchivedJoinTo::Many(inner) => { + for match_ in filter_match.iter() { + if let Some(bitmap) = inner.get(&match_.index) { + for filter_id in match_.filter_ids.iter() { + target_fragment_matches.add_match(*filter_id, &bitmap); + } + } + } + } + } + } + + if block_filter.always_include_header || !fragment_matches.is_empty() { + let header = fragment_access + .get_header_fragment() + .await + .change_context(DataStreamError) + .attach_printable("failed to get header fragment")?; + let header = header.access().change_context(DataStreamError)?; + + prost::encoding::encode_key( + HEADER_FRAGMENT_ID as u32, + prost::encoding::WireType::LengthDelimited, + &mut data_buffer, + ); + prost::encoding::encode_varint(header.data.len() as u64, &mut data_buffer); + data_buffer.put(header.data.as_slice()); + } + + for (fragment_id, filter_match) in fragment_matches.into_iter() { + let Some(fragment_name) = self.fragment_id_to_name.get(&fragment_id).cloned() + else { + return Err(DataStreamError) + .attach_printable("unknown fragment id") + .attach_printable_lazy(|| format!("fragment id: {}", fragment_id)); + }; + + let body = fragment_access + .get_body_fragment(fragment_id, fragment_name) + .await + .change_context(DataStreamError) + .attach_printable("failed to get body fragment")?; + let body = body.access().change_context(DataStreamError)?; + + for match_ in filter_match.iter() { + const FILTER_IDS_TAG: u32 = 1; + + let message_bytes = &body.data[match_.index as usize]; + let filter_ids_len = prost::encoding::uint32::encoded_len_packed( + FILTER_IDS_TAG, + &match_.filter_ids, + ); + + prost::encoding::encode_key( + fragment_id as u32, + prost::encoding::WireType::LengthDelimited, + &mut data_buffer, + ); + + prost::encoding::encode_varint( + (filter_ids_len + message_bytes.len()) as u64, + &mut data_buffer, + ); + + prost::encoding::uint32::encode_packed( + FILTER_IDS_TAG, + &match_.filter_ids, + &mut data_buffer, + ); + data_buffer.put(message_bytes.as_slice()); + } + } + + if !data_buffer.is_empty() { + has_data = true; + } + + output.push(data_buffer.freeze()); + } + + Ok(has_data) + } +} + +impl error_stack::Context for DataStreamError {} + +impl std::fmt::Display for DataStreamError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "data stream error") + } +} diff --git a/common/src/dbg/error.rs b/common/src/dbg/error.rs new file mode 100644 index 00000000..e3e11bb1 --- /dev/null +++ b/common/src/dbg/error.rs @@ -0,0 +1,10 @@ +#[derive(Debug)] +pub struct DebugCommandError; + +impl error_stack::Context for DebugCommandError {} + +impl std::fmt::Display for DebugCommandError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "debug command error") + } +} diff --git a/common/src/dbg/index.rs b/common/src/dbg/index.rs new file mode 100644 index 00000000..d50690fa --- /dev/null +++ b/common/src/dbg/index.rs @@ -0,0 +1,143 @@ +use std::{path::PathBuf, time::Instant}; + +use byte_unit::Byte; +use clap::Subcommand; +use error_stack::{Result, ResultExt}; +use tracing::info; + +use crate::{ + fragment::{ArchivedIndexFragment, FragmentId, IndexGroupFragment}, + index::Index, + segment::{ArchivedFragmentData, Segment}, +}; + +use super::error::DebugCommandError; + +#[derive(Subcommand, Debug)] +pub enum DebugIndexCommand { + /// Dump the content of an index. + TextDump { + /// Path to the index file. + path: PathBuf, + /// Print the fragment at the given offset. + #[arg(long)] + offset: Option, + /// Print the fragment with the given fragment ID. + #[arg(long)] + fragment_id: Option, + }, +} + +impl DebugIndexCommand { + pub async fn run(self) -> Result<(), DebugCommandError> { + match self { + DebugIndexCommand::TextDump { + path, + offset, + fragment_id, + } => { + info!(path = ?path, "reading index"); + + let bytes = std::fs::read(&path) + .change_context(DebugCommandError) + .attach_printable("failed to read index file")?; + + let start = Instant::now(); + let segment = rkyv::access::< + rkyv::Archived>, + rkyv::rancor::Error, + >(&bytes) + .change_context(DebugCommandError) + .attach_printable("failed to deserialize index segment")?; + let elapsed = start.elapsed(); + + let first_block = rkyv::deserialize::<_, rkyv::rancor::Error>(&segment.first_block) + .change_context(DebugCommandError) + .attach_printable("failed to deserialize first block")?; + + info!(first_block = %first_block, time = ?elapsed, "segment read"); + + if let Some(offset) = offset { + let data = segment + .data + .get(offset) + .ok_or(DebugCommandError) + .attach_printable("index at offset not found")?; + index_group_fragment_dump(offset, &fragment_id, data)?; + } else { + for (offset, data) in segment.data.iter().enumerate() { + index_group_fragment_dump(offset, &fragment_id, data)?; + } + } + + Ok(()) + } + } + } +} + +fn index_group_fragment_dump( + offset: usize, + fragment_id: &Option, + data: &ArchivedFragmentData, +) -> Result<(), DebugCommandError> { + let cursor = rkyv::deserialize::<_, rkyv::rancor::Error>(&data.cursor) + .change_context(DebugCommandError) + .attach_printable("failed to deserialize cursor")?; + info!(offset = offset, cursor = %cursor, "fragment data"); + + if let Some(fragment_id) = fragment_id { + let index = data + .data + .indexes + .iter() + .find(|f| f.fragment_id == *fragment_id) + .ok_or(DebugCommandError) + .attach_printable("fragment index not found")?; + index_fragment_dump(index)?; + } else { + for index in data.data.indexes.iter() { + index_fragment_dump(index)?; + } + } + + Ok(()) +} + +fn index_fragment_dump(fragment: &ArchivedIndexFragment) -> Result<(), DebugCommandError> { + let start = Instant::now(); + let fragment = rkyv::deserialize::<_, rkyv::rancor::Error>(fragment) + .change_context(DebugCommandError) + .attach_printable("failed to deserialize fragment")?; + let elapsed = start.elapsed(); + info!( + fragment_id = fragment.fragment_id, + range_start = fragment.range_start, + range_len = fragment.range_len, + indexes_len = fragment.indexes.len(), + time = ?elapsed, + "fragment index" + ); + + for index in fragment.indexes.iter() { + match &index.index { + Index::Bitmap(bitmap) => { + let keys = bitmap.keys().collect::>(); + let first = keys.first(); + let last = keys.last(); + let bitmap_size = bitmap.iter().map(|kv| kv.1.len() as u64).sum::(); + let bitmap_size = format!("{:#}", Byte::from_u64(bitmap_size)); + info!( + id = index.index_id, + keys = keys.len(), + bitmap_size, + first = ?first, + last = ?last, + "bitmap index" + ); + } + } + } + + Ok(()) +} diff --git a/common/src/dbg/mod.rs b/common/src/dbg/mod.rs new file mode 100644 index 00000000..944c56cb --- /dev/null +++ b/common/src/dbg/mod.rs @@ -0,0 +1,5 @@ +mod error; +mod index; + +pub use self::error::DebugCommandError; +pub use self::index::DebugIndexCommand; diff --git a/common/src/file_cache.rs b/common/src/file_cache.rs new file mode 100644 index 00000000..0fd07b2d --- /dev/null +++ b/common/src/file_cache.rs @@ -0,0 +1,209 @@ +use std::{path::PathBuf, str::FromStr, sync::Arc}; + +use bytes::Bytes; +use clap::Args; +use error_stack::{Result, ResultExt}; +use foyer::{ + AdmissionPicker, AdmitAllPicker, CacheEntry, Compression, DirectFsDeviceOptions, Engine, + HybridCache, HybridCacheBuilder, HybridFetch, LargeEngineOptions, RateLimitPicker, RecoverMode, + RuntimeConfig, TokioRuntimeConfig, +}; + +#[derive(Debug)] +pub enum FileCacheError { + Config, + Foyer(anyhow::Error), +} + +/// A cache with the content of remote files. +pub type FileCache = HybridCache; + +pub type FileFetch = HybridFetch; + +pub type CachedFile = CacheEntry; + +#[derive(Args, Debug)] +pub struct FileCacheArgs { + /// Where to store cached data. + #[clap(long = "cache.dir", env = "DNA_CACHE_DIR")] + pub cache_dir: Option, + /// Maximum size of the cache on disk. + #[clap( + long = "cache.size-disk", + env = "DNA_CACHE_SIZE_DISK", + default_value = "10Gi" + )] + pub cache_size_disk: String, + /// Size of the direct fs files. + #[clap( + long = "cache.file-size", + env = "DNA_CACHE_FILE_SIZE", + default_value = "1Gi" + )] + pub cache_file_size: String, + /// Maximum size of the cache in memory. + #[clap( + long = "cache.size-memory", + env = "DNA_CACHE_SIZE_MEMORY", + default_value = "2Gi" + )] + pub cache_size_memory: String, + /// Cache worker threads for reading. + #[clap( + long = "cache.runtime-read-threads", + env = "DNA_CACHE_RUNTIME_READ_THREADS", + default_value = "4" + )] + pub cache_runtime_read_threads: usize, + /// Cache worker threads for writing. + #[clap( + long = "cache.runtime-write-threads", + env = "DNA_CACHE_RUNTIME_WRITE_THREADS", + default_value = "4" + )] + pub cache_runtime_write_threads: usize, + /// Set how fast items can be inserted into the cache. + #[clap( + long = "cache.admission-rate-limit", + env = "DNA_CACHE_ADMISSION_RATE_LIMIT" + )] + pub cache_admission_rate_limit: Option, + /// Set the compression algorithm. + /// + /// One of: none, lz4, zstd. + #[clap( + long = "cache.compression", + env = "DNA_CACHE_COMPRESSION", + default_value = "none" + )] + pub cache_compression: String, + /// Enable `sync` after writes. + #[clap(long = "cache.flush", env = "DNA_CACHE_FLUSH")] + pub cache_flush: bool, + /// Set the flusher count. + #[clap( + long = "cache.flusher-count", + env = "DNA_CACHE_FLUSHER_COUNT", + default_value = "2" + )] + pub cache_flusher_count: usize, + /// Set the flush buffer pool size. + #[clap( + long = "cache.flush-buffer-pool-size", + env = "DNA_CACHE_FLUSH_BUFFER_POOL_SIZE", + default_value = "1Gi" + )] + pub cache_flush_buffer_pool_size: String, +} + +impl FileCacheArgs { + pub async fn to_file_cache(&self) -> Result { + let cache_dir = if let Some(cache_dir) = &self.cache_dir { + cache_dir + .parse::() + .change_context(FileCacheError::Config) + .attach_printable("failed to parse cache dir") + .attach_printable_lazy(|| format!("cache dir: {}", cache_dir))? + } else { + dirs::data_local_dir() + .ok_or(FileCacheError::Config) + .attach_printable("failed to get data dir")? + .join("dna") + }; + + let max_size_memory_bytes = byte_unit::Byte::from_str(&self.cache_size_memory) + .change_context(FileCacheError::Config) + .attach_printable("failed to parse in memory cache size") + .attach_printable_lazy(|| format!("cache size: {}", self.cache_size_memory))? + .as_u64(); + + let max_size_disk_bytes = byte_unit::Byte::from_str(&self.cache_size_disk) + .change_context(FileCacheError::Config) + .attach_printable("failed to parse on disk cache size") + .attach_printable_lazy(|| format!("cache size: {}", self.cache_size_disk))? + .as_u64(); + + let file_size = byte_unit::Byte::from_str(&self.cache_file_size) + .change_context(FileCacheError::Config) + .attach_printable("failed to parse cache file size") + .attach_printable_lazy(|| format!("file size: {}", self.cache_file_size))? + .as_u64(); + + let admission_picker: Arc> = + if let Some(rate_limit) = self.cache_admission_rate_limit.as_ref() { + let rate_limit = byte_unit::Byte::from_str(rate_limit) + .change_context(FileCacheError::Config) + .attach_printable("failed to parse admission rate limit") + .attach_printable_lazy(|| format!("rate limit: {}", rate_limit))? + .as_u64(); + Arc::new(RateLimitPicker::new(rate_limit as usize)) + } else { + Arc::new(AdmitAllPicker::default()) + }; + + let compression = match self.cache_compression.as_str() { + "none" => Compression::None, + "lz4" => Compression::Lz4, + "zstd" => Compression::Zstd, + _ => Err(FileCacheError::Config) + .attach_printable("failed to parse compression") + .attach_printable_lazy(|| format!("compression: {}", self.cache_compression))?, + }; + + let flush_buffer_pool_size = byte_unit::Byte::from_str(&self.cache_flush_buffer_pool_size) + .change_context(FileCacheError::Config) + .attach_printable("failed to parse flush buffer pool size") + .attach_printable_lazy(|| { + format!( + "flush buffer pool size: {}", + self.cache_flush_buffer_pool_size + ) + })? + .as_u64(); + + let builder = HybridCacheBuilder::new() + .with_name("dna.cache") + .memory(max_size_memory_bytes as usize) + .with_weighter(|_: &String, bytes: &Bytes| bytes.len()) + .storage(Engine::Large) + .with_compression(compression) + .with_admission_picker(admission_picker) + .with_runtime_config(RuntimeConfig::Separated { + read_runtime_config: TokioRuntimeConfig { + worker_threads: self.cache_runtime_read_threads, + max_blocking_threads: self.cache_runtime_read_threads * 2, + }, + write_runtime_config: TokioRuntimeConfig { + worker_threads: self.cache_runtime_write_threads, + max_blocking_threads: self.cache_runtime_write_threads * 2, + }, + }) + .with_large_object_disk_cache_options( + LargeEngineOptions::new() + .with_flushers(self.cache_flusher_count) + .with_buffer_pool_size(flush_buffer_pool_size as usize), + ) + .with_flush(self.cache_flush) + .with_device_options( + DirectFsDeviceOptions::new(cache_dir) + .with_capacity(max_size_disk_bytes as usize) + .with_file_size(file_size as usize), + ) + .with_recover_mode(RecoverMode::Quiet); + + let cache = builder.build().await.map_err(FileCacheError::Foyer)?; + + Ok(cache) + } +} + +impl error_stack::Context for FileCacheError {} + +impl std::fmt::Display for FileCacheError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + FileCacheError::Config => write!(f, "file cache builder error: config error"), + FileCacheError::Foyer(err) => write!(f, "file cache builder error: {}", err), + } + } +} diff --git a/common/src/fragment.rs b/common/src/fragment.rs new file mode 100644 index 00000000..5933ecd1 --- /dev/null +++ b/common/src/fragment.rs @@ -0,0 +1,99 @@ +//! Block fragments contain pieces of block data. + +use rkyv::{Archive, Deserialize, Serialize}; + +use crate::{index, join}; + +pub const INDEX_FRAGMENT_ID: FragmentId = 0; +pub const INDEX_FRAGMENT_NAME: &str = "index"; + +pub const JOIN_FRAGMENT_ID: FragmentId = u8::MAX; +pub const JOIN_FRAGMENT_NAME: &str = "join"; + +pub const HEADER_FRAGMENT_ID: FragmentId = 1; +pub const HEADER_FRAGMENT_NAME: &str = "header"; + +pub type FragmentId = u8; + +pub type IndexId = u8; + +/// Information about a fragment. +#[derive(Debug, Clone)] +pub struct FragmentInfo { + /// The fragment's unique ID. + pub fragment_id: FragmentId, + /// The fragment's name. + pub name: String, +} + +/// A pre-serialized protobuf message without the `filter_ids` field. +pub type SerializedProto = Vec; + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct Block { + pub header: HeaderFragment, + pub index: IndexGroupFragment, + pub join: JoinGroupFragment, + pub body: Vec, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct IndexGroupFragment { + pub indexes: Vec, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct JoinGroupFragment { + pub joins: Vec, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct HeaderFragment { + pub data: SerializedProto, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct IndexFragment { + pub fragment_id: FragmentId, + pub range_start: u32, + pub range_len: u32, + pub indexes: Vec, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct JoinFragment { + pub fragment_id: FragmentId, + pub joins: Vec, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct BodyFragment { + /// The fragment's unique ID. + pub fragment_id: FragmentId, + /// The fragment's name. + pub name: String, + /// The fragment's data. + pub data: Vec, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct Index { + pub index_id: IndexId, + pub index: index::Index, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct Join { + pub to_fragment_id: FragmentId, + pub index: join::JoinTo, +} + +impl IndexGroupFragment { + pub fn is_empty(&self) -> bool { + self.indexes.is_empty() + } + + pub fn len(&self) -> usize { + self.indexes.len() + } +} diff --git a/common/src/index.rs b/common/src/index.rs new file mode 100644 index 00000000..72c43319 --- /dev/null +++ b/common/src/index.rs @@ -0,0 +1,154 @@ +use std::{collections::BTreeMap, ops::RangeBounds}; + +use rkyv::{Archive, Deserialize, Serialize}; +use roaring::RoaringBitmap; + +/// Fixed-size scalar values. +#[derive(Archive, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum ScalarValue { + /// A null or empty value. + Null, + /// A boolean. + Bool(bool), + /// A signed integer with 32 bits. + Int32(i32), + /// An unsigned integer with 8 bits. + Uint8(u8), + /// An unsigned integer with 16 bits. + Uint16(u16), + /// An unsigned integer with 32 bits. + Uint32(u32), + /// An unsigned integer with 64 bits. + Uint64(u64), + /// A byte array with 20 elements. + B160([u8; 20]), + /// A byte array with 32 elements. + B256([u8; 32]), + /// A byte array with 48 elements. + B384([u8; 48]), +} + +/// Map scalar values to bitmaps. +#[derive(Archive, Serialize, Deserialize, Debug, Clone, Default)] +pub struct BitmapIndex { + keys: Vec, + values: Vec>, +} + +#[derive(Debug, Default)] +pub struct BitmapIndexBuilder(BTreeMap); + +impl BitmapIndexBuilder { + /// Insert a value in the index. + pub fn insert(&mut self, key: ScalarValue, value: u32) { + self.0.entry(key).or_default().insert(value); + } + + /// Insert a range of values in the index. + pub fn insert_range(&mut self, key: ScalarValue, range: R) + where + R: RangeBounds, + { + self.0.entry(key).or_default().insert_range(range); + } + + pub fn build(&self) -> std::io::Result { + self.0 + .iter() + .try_fold(BitmapIndex::default(), |mut index, (key, bitmap)| { + index.keys.push(key.clone()); + let mut out = Vec::new(); + bitmap.serialize_into(&mut out)?; + index.values.push(out); + + Ok(index) + }) + } +} + +impl BitmapIndex { + pub fn keys(&self) -> impl Iterator { + self.keys.iter() + } + + pub fn iter(&self) -> impl Iterator)> { + self.keys.iter().zip(self.values.iter()) + } +} + +impl ArchivedBitmapIndex { + pub fn get(&self, key: &ScalarValue) -> Option { + let pos = self + .keys + .binary_search_by(|entry| cmp_scalar_value(entry, key)) + .ok()?; + + let value = &self.values[pos]; + RoaringBitmap::deserialize_unchecked_from(value.as_slice()) + .expect("failed to deserialize bitmap") + .into() + } +} + +fn cmp_scalar_value(a: &ArchivedScalarValue, b: &ScalarValue) -> std::cmp::Ordering { + match (a, b) { + (ArchivedScalarValue::Null, ScalarValue::Null) => std::cmp::Ordering::Equal, + (ArchivedScalarValue::Bool(a), ScalarValue::Bool(b)) => a.cmp(b), + (ArchivedScalarValue::Int32(a), ScalarValue::Int32(b)) => a.to_native().cmp(b), + (ArchivedScalarValue::Uint8(a), ScalarValue::Uint8(b)) => a.cmp(b), + (ArchivedScalarValue::Uint16(a), ScalarValue::Uint16(b)) => a.to_native().cmp(b), + (ArchivedScalarValue::Uint32(a), ScalarValue::Uint32(b)) => a.to_native().cmp(b), + (ArchivedScalarValue::Uint64(a), ScalarValue::Uint64(b)) => a.to_native().cmp(b), + (ArchivedScalarValue::B160(a), ScalarValue::B160(b)) => a.cmp(b), + (ArchivedScalarValue::B256(a), ScalarValue::B256(b)) => a.cmp(b), + (ArchivedScalarValue::B384(a), ScalarValue::B384(b)) => a.cmp(b), + _ => std::cmp::Ordering::Greater, + } +} + +/// Data index. +#[derive(Debug, Archive, Serialize, Deserialize)] +pub enum Index { + /// An index containing bitmap values. + Bitmap(BitmapIndex), +} + +impl std::fmt::Debug for ScalarValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ScalarValue::Null => write!(f, "Null"), + ScalarValue::Bool(v) => write!(f, "Bool({})", v), + ScalarValue::Int32(v) => write!(f, "Int32({})", v), + ScalarValue::Uint8(v) => write!(f, "Uint8({})", v), + ScalarValue::Uint16(v) => write!(f, "Uint16({})", v), + ScalarValue::Uint32(v) => write!(f, "Uint32({})", v), + ScalarValue::Uint64(v) => write!(f, "Uint64({})", v), + ScalarValue::B160(v) => write!(f, "B160(0x{})", hex::encode(v)), + ScalarValue::B256(v) => write!(f, "B256(0x{})", hex::encode(v)), + ScalarValue::B384(v) => write!(f, "B384({})", hex::encode(v)), + } + } +} + +impl std::fmt::Debug for ArchivedScalarValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ArchivedScalarValue::Null => write!(f, "ArchivedNull"), + ArchivedScalarValue::Bool(v) => write!(f, "ArchivedBool({})", v), + ArchivedScalarValue::Int32(v) => write!(f, "ArchivedInt32({})", v), + ArchivedScalarValue::Uint8(v) => write!(f, "ArchivedUint8({})", v), + ArchivedScalarValue::Uint16(v) => write!(f, "ArchivedUint16({})", v), + ArchivedScalarValue::Uint32(v) => write!(f, "ArchivedUint32({})", v), + ArchivedScalarValue::Uint64(v) => write!(f, "ArchivedUint64({})", v), + ArchivedScalarValue::B160(v) => write!(f, "ArchivedB160(0x{})", hex::encode(v)), + ArchivedScalarValue::B256(v) => write!(f, "ArchivedB256(0x{})", hex::encode(v)), + ArchivedScalarValue::B384(v) => write!(f, "ArchivedB384({})", hex::encode(v)), + } + } +} + +impl From for Index { + fn from(value: BitmapIndex) -> Self { + Index::Bitmap(value) + } +} diff --git a/common/src/ingestion/cli.rs b/common/src/ingestion/cli.rs new file mode 100644 index 00000000..f1d83229 --- /dev/null +++ b/common/src/ingestion/cli.rs @@ -0,0 +1,59 @@ +use clap::Args; + +#[derive(Args, Debug)] +pub struct IngestionArgs { + /// Whether to run the ingestion service. + #[clap(long = "ingestion.enabled", env = "DNA_INGESTION_ENABLED")] + pub ingestion_enabled: bool, + /// How many concurrent ingestion tasks to run. + #[clap( + long = "ingestion.max-concurrent-tasks", + env = "DNA_INGESTION_MAX_CONCURRENT_TASKS", + default_value = "100" + )] + pub ingestion_max_concurrent_tasks: usize, + /// How many blocks each chain segment contains. + #[clap( + long = "ingestion.chain-segment-size", + env = "DNA_INGESTION_CHAIN_SEGMENT_SIZE", + default_value = "10000" + )] + pub ingestion_chain_segment_size: usize, + /// Override the ingestion starting block. + #[clap( + long = "ingestion.dangerously-override-starting-block", + env = "DNA_INGESTION_DANGEROUSLY_OVERRIDE_STARTING_BLOCK" + )] + pub ingestion_dangerously_override_starting_block: Option, + /// How often to refresh the head block, in seconds. + #[clap( + long = "ingestion.head-refresh-interval", + env = "DNA_INGESTION_HEAD_REFRESH_INTERVAL", + default_value = "3" + )] + pub ingestion_head_refresh_interval: u64, + /// How often to refresh the finalized block, in seconds. + #[clap( + long = "ingestion.finalized-refresh-interval", + env = "DNA_INGESTION_FINALIZED_REFRESH_INTERVAL", + default_value = "30" + )] + pub ingestion_finalized_refresh_interval: u64, +} + +impl IngestionArgs { + pub fn to_ingestion_service_options(&self) -> super::IngestionServiceOptions { + super::IngestionServiceOptions { + max_concurrent_tasks: self.ingestion_max_concurrent_tasks, + chain_segment_size: self.ingestion_chain_segment_size, + chain_segment_upload_offset_size: 100, + override_starting_block: self.ingestion_dangerously_override_starting_block, + head_refresh_interval: std::time::Duration::from_secs( + self.ingestion_head_refresh_interval, + ), + finalized_refresh_interval: std::time::Duration::from_secs( + self.ingestion_finalized_refresh_interval, + ), + } + } +} diff --git a/common/src/ingestion/error.rs b/common/src/ingestion/error.rs new file mode 100644 index 00000000..7090f2e1 --- /dev/null +++ b/common/src/ingestion/error.rs @@ -0,0 +1,56 @@ +use error_stack::Report; + +#[derive(Debug, Clone)] +pub enum IngestionError { + BlockNotFound, + RpcRequest, + CanonicalChainStoreRequest, + BlockStoreRequest, + StateClientRequest, + LockKeepAlive, + Options, + BadHash, + Model, + Indexing, +} + +pub trait IngestionErrorExt { + fn is_block_not_found(&self) -> bool; +} + +impl IngestionErrorExt for Report { + fn is_block_not_found(&self) -> bool { + matches!(self.current_context(), IngestionError::BlockNotFound) + } +} + +impl error_stack::Context for IngestionError {} + +impl std::fmt::Display for IngestionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IngestionError::BlockNotFound => write!(f, "ingestion error: block not found"), + IngestionError::RpcRequest => write!(f, "ingestion error: rpc request error"), + IngestionError::BlockStoreRequest => { + write!(f, "ingestion error: block store request error") + } + IngestionError::CanonicalChainStoreRequest => { + write!(f, "ingestion error: canonical chain store request error") + } + IngestionError::StateClientRequest => { + write!(f, "ingestion error: state client request error") + } + IngestionError::BadHash => write!(f, "ingestion error: bad hash"), + IngestionError::Model => write!(f, "ingestion error: conversion error"), + IngestionError::LockKeepAlive => { + write!(f, "ingestion error: failed to keep lock alive") + } + IngestionError::Options => { + write!(f, "ingestion error: invalid options") + } + IngestionError::Indexing => { + write!(f, "ingestion error: indexing error") + } + } + } +} diff --git a/common/src/ingestion/mod.rs b/common/src/ingestion/mod.rs new file mode 100644 index 00000000..4860e7f7 --- /dev/null +++ b/common/src/ingestion/mod.rs @@ -0,0 +1,109 @@ +mod cli; +mod error; +mod service; +mod state_client; + +use apibara_etcd::{EtcdClient, LockOptions}; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::{error, info, warn}; + +use crate::file_cache::FileCache; +use crate::object_store::ObjectStore; +use crate::options_store::OptionsStore; + +pub use self::cli::IngestionArgs; +pub use self::error::{IngestionError, IngestionErrorExt}; +pub use self::service::{BlockIngestion, IngestionService, IngestionServiceOptions}; +pub use self::state_client::{ + IngestionStateClient, IngestionStateClientError, IngestionStateUpdate, FINALIZED_KEY, + INGESTED_KEY, INGESTION_PREFIX_KEY, STARTING_BLOCK_KEY, +}; + +pub async fn ingestion_service_loop( + ingestion: I, + etcd_client: EtcdClient, + object_store: ObjectStore, + file_cache: FileCache, + options: IngestionServiceOptions, + ct: CancellationToken, +) -> Result<(), IngestionError> +where + I: BlockIngestion + Send + Sync + 'static, +{ + let mut lock_client = etcd_client.lock_client(LockOptions::default()); + + while !ct.is_cancelled() { + info!("acquiring ingestion lock"); + + let Some(mut lock) = lock_client + .lock("ingestion/lock", ct.clone()) + .await + .change_context(IngestionError::LockKeepAlive)? + else { + warn!("failed to acquire ingestion lock"); + break; + }; + + info!("ingestion lock acquired"); + + // Compare the current options with the stored options. + // If they differ, return an error. + let mut options_store = OptionsStore::new(&etcd_client); + if let Some(chain_segment_size) = options_store + .get_chain_segment_size() + .await + .change_context(IngestionError::Options) + .attach_printable("failed to get chain segment size options")? + { + if chain_segment_size != options.chain_segment_size { + return Err(IngestionError::Options) + .attach_printable("chain segment size changed") + .attach_printable_lazy(|| { + format!("stored chain segment size: {}", chain_segment_size) + }) + .attach_printable_lazy(|| { + format!("new chain segment size: {}", options.chain_segment_size) + }); + } + } else { + options_store + .set_chain_segment_size(options.chain_segment_size) + .await + .change_context(IngestionError::Options) + .attach_printable("failed to set chain segment size options")?; + } + + let ingestion_service = IngestionService::new( + ingestion.clone(), + etcd_client.clone(), + object_store.clone(), + file_cache.clone(), + options.clone(), + ); + + match ingestion_service.start(&mut lock, ct.clone()).await { + Ok(_) => { + lock_client + .unlock(lock) + .await + .change_context(IngestionError::LockKeepAlive)?; + info!("ingestion lock released"); + break; + } + Err(err) => { + error!(error = ?err, "ingestion service error"); + lock_client + .unlock(lock) + .await + .change_context(IngestionError::LockKeepAlive)?; + info!("ingestion lock released"); + + // TODO: configurable with exponential backoff. + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + } + } + } + + Ok(()) +} diff --git a/common/src/ingestion/service.rs b/common/src/ingestion/service.rs new file mode 100644 index 00000000..35c6599e --- /dev/null +++ b/common/src/ingestion/service.rs @@ -0,0 +1,508 @@ +use std::{future::Future, sync::Arc, time::Duration}; + +use apibara_etcd::{EtcdClient, Lock}; +use error_stack::{Result, ResultExt}; +use futures::{stream::FuturesOrdered, StreamExt}; +use tokio::{task::JoinHandle, time::Interval}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, field, info, trace, Instrument}; + +use crate::{ + block_store::BlockStoreWriter, + chain::{BlockInfo, CanonicalChainBuilder}, + chain_store::ChainStore, + file_cache::FileCache, + fragment::Block, + object_store::ObjectStore, + Cursor, +}; + +use super::{error::IngestionError, state_client::IngestionStateClient}; + +pub trait BlockIngestion: Clone { + fn get_head_cursor(&self) -> impl Future> + Send; + fn get_finalized_cursor(&self) -> impl Future> + Send; + fn get_block_info_by_number( + &self, + block_number: u64, + ) -> impl Future> + Send; + + fn ingest_block_by_number( + &self, + block_number: u64, + ) -> impl Future> + Send; +} + +type IngestionTaskHandle = JoinHandle>; + +#[derive(Clone, Debug)] +pub struct IngestionServiceOptions { + /// Maximum number of concurrent ingestion tasks. + pub max_concurrent_tasks: usize, + /// How many blocks in a single chain segment. + pub chain_segment_size: usize, + /// How many finalized blocks to wait before uploading a chain segment. + pub chain_segment_upload_offset_size: usize, + /// Override the ingestion starting block. + pub override_starting_block: Option, + /// How often to refresh the head block. + pub head_refresh_interval: Duration, + /// How often to refresh the finalized block. + pub finalized_refresh_interval: Duration, +} + +pub struct IngestionService +where + I: BlockIngestion, +{ + options: IngestionServiceOptions, + ingestion: IngestionInner, + state_client: IngestionStateClient, + chain_store: ChainStore, + chain_builder: CanonicalChainBuilder, + task_queue: FuturesOrdered, +} + +/// Wrap ingestion-related clients so we can clone them and push them to the task queue. +#[derive(Clone)] +struct IngestionInner +where + I: BlockIngestion, +{ + block_store: BlockStoreWriter, + ingestion: Arc, +} + +enum IngestionState { + Ingest(IngestState), + Recover, +} + +struct IngestState { + finalized: Cursor, + head: Cursor, + queued_block_number: u64, + head_refresh_interval: Interval, + finalized_refresh_interval: Interval, +} + +/// What action to take when starting ingestion. +enum IngestionStartAction { + /// Resume ingestion from the given cursor (cursor already ingested). + Resume(Cursor), + /// Start ingestion from the given block number (inclusive). + Start(u64), +} + +impl IngestionService +where + I: BlockIngestion + Send + Sync + 'static, +{ + pub fn new( + ingestion: I, + etcd_client: EtcdClient, + object_store: ObjectStore, + file_cache: FileCache, + options: IngestionServiceOptions, + ) -> Self { + let chain_store = ChainStore::new(object_store.clone(), file_cache); + let block_store = BlockStoreWriter::new(object_store); + let state_client = IngestionStateClient::new(&etcd_client); + + Self { + options, + ingestion: IngestionInner { + ingestion: ingestion.into(), + block_store, + }, + state_client, + chain_store, + chain_builder: CanonicalChainBuilder::new(), + task_queue: FuturesOrdered::new(), + } + } + + pub async fn start( + mut self, + lock: &mut Lock, + ct: CancellationToken, + ) -> Result<(), IngestionError> { + let mut state = self.initialize().await?; + + loop { + if ct.is_cancelled() { + return Ok(()); + } + + let tick_span = tracing::info_span!( + "ingestion_tick", + state_name = state.state_name(), + head = field::Empty, + finalized = field::Empty, + task_queue_size = field::Empty, + action = field::Empty, + ); + + lock.keep_alive() + .await + .change_context(IngestionError::LockKeepAlive)?; + + state = async { + match state { + IngestionState::Ingest(inner_state) => { + self.tick_ingest(inner_state, ct.clone()).await + } + IngestionState::Recover => { + // TODO: implement recovery. + Err(IngestionError::Model).attach_printable("chain is in recovery state") + } + } + } + .instrument(tick_span) + .await?; + } + } + + #[tracing::instrument( + name = "ingestion_init", + skip_all, + err(Debug), + fields(head, finalized, starting_block) + )] + async fn initialize(&mut self) -> Result { + let head = self.ingestion.get_head_cursor().await?; + let finalized = self.ingestion.get_finalized_cursor().await?; + + let current_span = tracing::Span::current(); + + current_span.record("head", head.number); + current_span.record("finalized", finalized.number); + + self.state_client + .put_finalized(finalized.number) + .await + .change_context(IngestionError::StateClientRequest)?; + + match self.get_starting_cursor().await? { + IngestionStartAction::Start(starting_block) => { + // Ingest genesis block here so that the rest of the body is the same + // as if we were resuming ingestion. + info!( + starting_block = starting_block, + "starting ingestion from genesis block" + ); + + let block_info = self + .ingestion + .ingest_block_by_number(starting_block) + .await?; + + let starting_cursor = block_info.cursor(); + + self.chain_builder + .grow(block_info) + .change_context(IngestionError::Model)?; + + current_span.record("starting_block", starting_block); + + info!(cursor = %starting_cursor, "uploaded genesis block"); + + Ok(IngestionState::Ingest(IngestState { + queued_block_number: starting_cursor.number, + finalized, + head, + head_refresh_interval: tokio::time::interval( + self.options.head_refresh_interval, + ), + finalized_refresh_interval: tokio::time::interval( + self.options.finalized_refresh_interval, + ), + })) + } + IngestionStartAction::Resume(starting_cursor) => { + current_span.record("starting_block", starting_cursor.number); + + Ok(IngestionState::Ingest(IngestState { + queued_block_number: starting_cursor.number, + finalized, + head, + head_refresh_interval: tokio::time::interval( + self.options.head_refresh_interval, + ), + finalized_refresh_interval: tokio::time::interval( + self.options.finalized_refresh_interval, + ), + })) + } + } + } + + /// A single tick of ingestion. + /// + /// This is equivalent to `viewStep` in the Quint spec. + async fn tick_ingest( + &mut self, + mut state: IngestState, + ct: CancellationToken, + ) -> Result { + let current_span = tracing::Span::current(); + + current_span.record("head", state.head.number); + current_span.record("finalized", state.finalized.number); + current_span.record("task_queue_size", self.task_queue.len()); + + tokio::select! { + biased; + + _ = ct.cancelled() => Ok(IngestionState::Ingest(state)), + + _ = state.finalized_refresh_interval.tick() => { + current_span.record("action", "refresh_finalized"); + + let finalized = self.ingestion.get_finalized_cursor().await.change_context(IngestionError::RpcRequest) + .attach_printable("failed to refresh finalized cursor")?; + + if state.finalized.number > finalized.number { + return Err(IngestionError::Model) + .attach_printable("the new finalized cursor is behind the old one") + .attach_printable("this should never happen"); + } + + if state.finalized == finalized { + return Ok(IngestionState::Ingest(state)); + } + + info!(cursor = %finalized, "refreshed finalized cursor"); + + self.state_client.put_finalized(finalized.number).await.change_context(IngestionError::StateClientRequest)?; + + Ok(IngestionState::Ingest(IngestState { + finalized, + ..state + })) + } + + _ = state.head_refresh_interval.tick() => { + current_span.record("action", "refresh_head"); + + let head = self.ingestion.get_head_cursor().await.change_context(IngestionError::RpcRequest) + .attach_printable("failed to refresh head cursor")?; + + if state.head == head { + return Ok(IngestionState::Ingest(state)); + } + + if state.head.number > head.number { + info!(old_head = %state.head, new_head = %head, "reorg detected"); + return Ok(IngestionState::Recover); + } + + if state.head.number == head.number && state.head.hash != head.hash { + return Ok(IngestionState::Recover); + } + + info!(cursor = %head, "refreshed head cursor"); + + let mut block_number = state.queued_block_number; + while self.can_push_task() { + if block_number + 1 > state.head.number { + break; + } + + block_number += 1; + trace!(block_number, "pushing finalized ingestion task"); + self.push_ingest_block_by_number(block_number); + } + + Ok(IngestionState::Ingest(IngestState { + head, + queued_block_number: block_number, + ..state + })) + } + + join_result = self.task_queue.next(), if !self.task_queue.is_empty() => { + current_span.record("action", "finish_ingestion"); + + if let Some(join_result) = join_result { + let block_info = join_result + .change_context(IngestionError::RpcRequest)? + .attach_printable("failed to join ingestion task") + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to ingest block")?; + + info!(block = %block_info.cursor(), "ingested block"); + + // Always upload recent segment if the block is non-finalized. + let mut should_upload_recent_segment = block_info.number >= state.finalized.number; + + if !self.chain_builder.can_grow(&block_info) { + return Ok(IngestionState::Recover); + } + + self.chain_builder.grow(block_info).change_context(IngestionError::Model)?; + + if self.chain_builder.segment_size() == self.options.chain_segment_size + self.options.chain_segment_upload_offset_size + { + let segment = self.chain_builder.take_segment(self.options.chain_segment_size).change_context(IngestionError::Model)?; + info!(first_block = %segment.info.first_block, "uploading chain segment"); + self.chain_store.put(&segment).await.change_context(IngestionError::CanonicalChainStoreRequest)?; + + should_upload_recent_segment = true; + } + + if should_upload_recent_segment { + let current_segment = self.chain_builder.current_segment().change_context(IngestionError::Model)?; + info!(first_block = %current_segment.info.first_block, last_block = %current_segment.info.last_block, "uploading recent chain segment"); + let recent_etag = self.chain_store.put_recent(¤t_segment).await.change_context(IngestionError::CanonicalChainStoreRequest)?; + self.state_client.put_ingested(recent_etag).await.change_context(IngestionError::StateClientRequest)?; + } + } + + let mut block_number = state.queued_block_number; + + while self.can_push_task() { + if block_number + 1 > state.head.number { + break; + } + + block_number += 1; + trace!(block_number, "pushing finalized ingestion task"); + self.push_ingest_block_by_number(block_number); + } + + Ok(IngestionState::Ingest(IngestState { + queued_block_number: block_number, + ..state + })) + } + } + } + + fn can_push_task(&self) -> bool { + self.task_queue.len() < self.options.max_concurrent_tasks + } + + fn push_ingest_block_by_number(&mut self, block_number: u64) { + let ingestion = self.ingestion.clone(); + self.task_queue.push_back(tokio::spawn(async move { + ingestion.ingest_block_by_number(block_number).await + })); + } + + async fn get_starting_cursor(&mut self) -> Result { + let existing_chain_segment = self + .chain_store + .get_recent(None) + .await + .change_context(IngestionError::CanonicalChainStoreRequest) + .attach_printable("failed to get recent canonical chain segment")?; + + if let Some(existing_chain_segment) = existing_chain_segment { + info!("restoring canonical chain"); + self.chain_builder = + CanonicalChainBuilder::restore_from_segment(existing_chain_segment) + .change_context(IngestionError::Model) + .attach_printable("failed to restore canonical chain from recent segment")?; + let info = self.chain_builder.info().ok_or(IngestionError::Model)?; + + info!(first_block = %info.first_block, last_block = %info.last_block, "ingestion state restored"); + + let block_info = self + .ingestion + .get_block_info_by_number(info.last_block.number) + .await?; + + if info.last_block != block_info.cursor() { + return Err(IngestionError::Model) + .attach_printable("last block in chain does not match last block in state") + .attach_printable("offline reorg not handled yet") + .attach_printable_lazy(|| format!("last block in state: {}", info.last_block)) + .attach_printable_lazy(|| format!("last block: {}", block_info.cursor())); + } + + Ok(IngestionStartAction::Resume(block_info.cursor())) + } else { + let starting_block = self.options.override_starting_block.unwrap_or(0); + + self.state_client + .put_starting_block(starting_block) + .await + .change_context(IngestionError::StateClientRequest)?; + + Ok(IngestionStartAction::Start(starting_block)) + } + } +} + +impl IngestionInner +where + I: BlockIngestion + Send + Sync + 'static, +{ + #[tracing::instrument("ingestion_ingest_block", skip(self), err(Debug))] + async fn ingest_block_by_number(&self, block_number: u64) -> Result { + let ingestion = self.ingestion.clone(); + let store = self.block_store.clone(); + let (block_info, block) = ingestion + .ingest_block_by_number(block_number) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to ingest block") + .attach_printable_lazy(|| format!("block number: {}", block_number))?; + + if block.index.len() != block.body.len() { + return Err(IngestionError::Model) + .attach_printable("block indexes and body fragments do not match") + .attach_printable_lazy(|| format!("block number: {}", block_number)) + .attach_printable_lazy(|| format!("indexes len: {}", block.index.len())) + .attach_printable_lazy(|| format!("body len: {}", block.body.len())); + } + + let block_cursor = block_info.cursor(); + debug!(cursor = %block_cursor, "uploading block"); + + store + .put_block(&block_cursor, &block) + .await + .change_context(IngestionError::BlockStoreRequest)?; + + Ok(block_info) + } + + async fn get_head_cursor(&self) -> Result { + self.ingestion.get_head_cursor().await + } + + async fn get_finalized_cursor(&self) -> Result { + self.ingestion.get_finalized_cursor().await + } + + async fn get_block_info_by_number( + &self, + block_number: u64, + ) -> Result { + self.ingestion.get_block_info_by_number(block_number).await + } +} + +impl Default for IngestionServiceOptions { + fn default() -> Self { + Self { + max_concurrent_tasks: 100, + chain_segment_size: 10_000, + chain_segment_upload_offset_size: 100, + override_starting_block: None, + head_refresh_interval: Duration::from_secs(3), + finalized_refresh_interval: Duration::from_secs(30), + } + } +} + +impl IngestionState { + pub fn state_name(&self) -> &'static str { + match self { + IngestionState::Recover => "recover", + IngestionState::Ingest(_) => "ingest", + } + } +} diff --git a/common/src/ingestion/state_client.rs b/common/src/ingestion/state_client.rs new file mode 100644 index 00000000..e54ed81c --- /dev/null +++ b/common/src/ingestion/state_client.rs @@ -0,0 +1,312 @@ +use apibara_etcd::{EtcdClient, KvClient, WatchClient}; +use error_stack::{Result, ResultExt}; +use futures::{Stream, StreamExt}; +use tokio_util::sync::CancellationToken; + +use crate::object_store::ObjectETag; + +pub static INGESTION_PREFIX_KEY: &str = "ingestion/"; +pub static INGESTED_KEY: &str = "ingestion/ingested"; +pub static STARTING_BLOCK_KEY: &str = "ingestion/starting_block"; +pub static FINALIZED_KEY: &str = "ingestion/finalized"; +pub static SEGMENTED_KEY: &str = "ingestion/segmented"; +pub static GROUPED_KEY: &str = "ingestion/grouped"; + +#[derive(Debug)] +pub struct IngestionStateClientError; + +#[derive(Clone)] +pub struct IngestionStateClient { + kv_client: KvClient, + watch_client: WatchClient, +} + +#[derive(Clone, Debug)] +pub enum IngestionStateUpdate { + StartingBlock(u64), + Finalized(u64), + Segmented(u64), + Grouped(u64), + Ingested(String), +} + +impl IngestionStateClient { + pub fn new(client: &EtcdClient) -> Self { + let kv_client = client.kv_client(); + let watch_client = client.watch_client(); + + Self { + kv_client, + watch_client, + } + } + + pub async fn watch_changes( + &mut self, + ct: CancellationToken, + ) -> Result< + impl Stream>, + IngestionStateClientError, + > { + let (_watcher, stream) = self + .watch_client + .watch_prefix(INGESTION_PREFIX_KEY, ct) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to watch ingestion state")?; + + let changes = stream.flat_map(|response| { + let response = match response { + Err(err) => { + return futures::stream::iter(vec![ + Err(err).change_context(IngestionStateClientError) + ]); + } + Ok(response) => response, + }; + + let changes = response + .events() + .iter() + .filter_map(|event| { + let kv = event.kv()?; + + match IngestionStateUpdate::from_kv(kv) { + Ok(Some(update)) => Some(Ok(update)), + Ok(None) => None, + Err(err) => Some(Err(err)), + } + }) + .collect::>>(); + futures::stream::iter(changes) + }); + + Ok(changes) + } + + pub async fn get_starting_block(&mut self) -> Result, IngestionStateClientError> { + let response = self + .kv_client + .get(STARTING_BLOCK_KEY) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to get starting block")?; + + let Some(kv) = response.kvs().first() else { + return Ok(None); + }; + + let value = String::from_utf8(kv.value().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode starting block")?; + + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse starting block")?; + + Ok(Some(block)) + } + + pub async fn put_starting_block( + &mut self, + block: u64, + ) -> Result<(), IngestionStateClientError> { + let value = block.to_string(); + self.kv_client + .put(STARTING_BLOCK_KEY, value.as_bytes()) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to put starting block")?; + + Ok(()) + } + + pub async fn get_finalized(&mut self) -> Result, IngestionStateClientError> { + let response = self + .kv_client + .get(FINALIZED_KEY) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to get finalized block")?; + + let Some(kv) = response.kvs().first() else { + return Ok(None); + }; + + let value = String::from_utf8(kv.value().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode finalized block")?; + + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse finalized block")?; + + Ok(Some(block)) + } + + pub async fn put_finalized(&mut self, block: u64) -> Result<(), IngestionStateClientError> { + let value = block.to_string(); + self.kv_client + .put(FINALIZED_KEY, value.as_bytes()) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to put finalized block")?; + + Ok(()) + } + + pub async fn get_ingested(&mut self) -> Result, IngestionStateClientError> { + let response = self + .kv_client + .get(INGESTED_KEY) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to get latest ingested block")?; + + let Some(kv) = response.kvs().first() else { + return Ok(None); + }; + + let etag = String::from_utf8(kv.value().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode etag")?; + + Ok(Some(ObjectETag(etag))) + } + + pub async fn put_ingested( + &mut self, + etag: ObjectETag, + ) -> Result<(), IngestionStateClientError> { + let value = etag.0; + self.kv_client + .put(INGESTED_KEY, value.as_bytes()) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to put latest ingested block")?; + + Ok(()) + } + + pub async fn get_segmented(&mut self) -> Result, IngestionStateClientError> { + let response = self + .kv_client + .get(SEGMENTED_KEY) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to get segmented block")?; + + let Some(kv) = response.kvs().first() else { + return Ok(None); + }; + + let value = String::from_utf8(kv.value().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode segmented block")?; + + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse segmented block")?; + + Ok(Some(block)) + } + + pub async fn put_segmented(&mut self, block: u64) -> Result<(), IngestionStateClientError> { + let value = block.to_string(); + self.kv_client + .put(SEGMENTED_KEY, value.as_bytes()) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to put segmented block")?; + + Ok(()) + } + + pub async fn get_grouped(&mut self) -> Result, IngestionStateClientError> { + let response = self + .kv_client + .get(GROUPED_KEY) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to get grouped block")?; + + let Some(kv) = response.kvs().first() else { + return Ok(None); + }; + + let value = String::from_utf8(kv.value().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode grouped block")?; + + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse grouped block")?; + + Ok(Some(block)) + } + + pub async fn put_grouped(&mut self, block: u64) -> Result<(), IngestionStateClientError> { + let value = block.to_string(); + self.kv_client + .put(GROUPED_KEY, value.as_bytes()) + .await + .change_context(IngestionStateClientError) + .attach_printable("failed to put grouped block")?; + + Ok(()) + } +} + +impl error_stack::Context for IngestionStateClientError {} + +impl std::fmt::Display for IngestionStateClientError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ingestion state client error") + } +} + +impl IngestionStateUpdate { + pub fn from_kv(kv: &etcd_client::KeyValue) -> Result, IngestionStateClientError> { + let key = String::from_utf8(kv.key().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode key")?; + + let value = String::from_utf8(kv.value().to_vec()) + .change_context(IngestionStateClientError) + .attach_printable("failed to decode value")?; + + if key.ends_with(STARTING_BLOCK_KEY) { + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse starting block")?; + Ok(Some(IngestionStateUpdate::StartingBlock(block))) + } else if key.ends_with(FINALIZED_KEY) { + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse finalized block")?; + Ok(Some(IngestionStateUpdate::Finalized(block))) + } else if key.ends_with(INGESTED_KEY) { + Ok(Some(IngestionStateUpdate::Ingested(value))) + } else if key.ends_with(SEGMENTED_KEY) { + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse segmented block")?; + Ok(Some(IngestionStateUpdate::Segmented(block))) + } else if key.ends_with(GROUPED_KEY) { + let block = value + .parse::() + .change_context(IngestionStateClientError) + .attach_printable("failed to parse grouped block")?; + Ok(Some(IngestionStateUpdate::Grouped(block))) + } else { + Ok(None) + } + } +} diff --git a/common/src/join.rs b/common/src/join.rs new file mode 100644 index 00000000..39203aaf --- /dev/null +++ b/common/src/join.rs @@ -0,0 +1,109 @@ +use std::{collections::BTreeMap, ops::RangeBounds}; + +use rkyv::{Archive, Deserialize, Serialize}; +use roaring::RoaringBitmap; + +#[derive(Debug, Default, Archive, Serialize, Deserialize)] +pub struct JoinToOneIndex { + pub keys: Vec, + pub values: Vec, +} + +#[derive(Debug, Default, Archive, Serialize, Deserialize)] +pub struct JoinToManyIndex { + pub keys: Vec, + pub values: Vec>, +} + +/// Index to join one fragment to another. +#[derive(Debug, Archive, Serialize, Deserialize)] +pub enum JoinTo { + One(JoinToOneIndex), + Many(JoinToManyIndex), +} + +#[derive(Debug, Default)] +pub struct JoinToOneIndexBuilder(BTreeMap); + +impl JoinToOneIndexBuilder { + pub fn insert(&mut self, key: u32, value: u32) { + self.0.insert(key, value); + } + + pub fn build(self) -> JoinToOneIndex { + self.0 + .iter() + .fold(JoinToOneIndex::default(), |mut index, (key, value)| { + index.keys.push(*key); + index.values.push(*value); + index + }) + } +} + +#[derive(Debug, Default)] +pub struct JoinToManyIndexBuilder(BTreeMap); + +impl JoinToManyIndexBuilder { + pub fn insert(&mut self, key: u32, value: u32) { + self.0.entry(key).or_default().insert(value); + } + + pub fn insert_range(&mut self, key: u32, range: R) + where + R: RangeBounds, + { + self.0.entry(key).or_default().insert_range(range); + } + + pub fn build(self) -> std::io::Result { + self.0 + .iter() + .try_fold(JoinToManyIndex::default(), |mut index, (key, value)| { + index.keys.push(*key); + let mut out = Vec::new(); + value.serialize_into(&mut out)?; + index.values.push(out); + + Ok(index) + }) + } +} + +impl ArchivedJoinToOneIndex { + pub fn get(&self, key: &u32) -> Option { + let pos = self + .keys + .binary_search_by(|entry| entry.to_native().cmp(key)) + .ok()?; + + let value = &self.values[pos]; + Some(value.to_native()) + } +} + +impl ArchivedJoinToManyIndex { + pub fn get(&self, key: &u32) -> Option { + let pos = self + .keys + .binary_search_by(|entry| entry.to_native().cmp(key)) + .ok()?; + + let value = &self.values[pos]; + RoaringBitmap::deserialize_unchecked_from(value.as_slice()) + .expect("failed to deserialize bitmap") + .into() + } +} + +impl From for JoinTo { + fn from(value: JoinToOneIndex) -> Self { + JoinTo::One(value) + } +} + +impl From for JoinTo { + fn from(value: JoinToManyIndex) -> Self { + JoinTo::Many(value) + } +} diff --git a/common/src/lib.rs b/common/src/lib.rs new file mode 100644 index 00000000..2cd991d6 --- /dev/null +++ b/common/src/lib.rs @@ -0,0 +1,235 @@ +pub mod block_store; +pub mod chain; +pub mod chain_store; +pub mod chain_view; +pub mod cli; +pub mod compaction; +mod core; +pub mod data_stream; +pub mod dbg; +pub mod file_cache; +pub mod fragment; +pub mod index; +pub mod ingestion; +pub mod join; +pub mod object_store; +pub mod options_store; +pub mod query; +pub mod rkyv; +pub mod segment; +pub mod server; + +pub use apibara_etcd as etcd; +use data_stream::BlockFilterFactory; +use fragment::FragmentInfo; +use ingestion::BlockIngestion; + +pub use self::core::{testing::new_test_cursor, Cursor, GetCursor, Hash}; + +pub use self::cli::StartArgs; + +pub trait ChainSupport { + type BlockIngestion: BlockIngestion + Send + Sync + 'static; + type BlockFilterFactory: BlockFilterFactory + Send + Sync + 'static; + + /// Returns the fragments generated by the chain. + fn fragment_info(&self) -> Vec; + + /// Returns the block ingestion service. + fn block_ingestion(&self) -> Self::BlockIngestion; + + /// Returns the block filter factory. + fn block_filter_factory(&self) -> Self::BlockFilterFactory; +} + +pub use self::server_impl::{run_server, ServerError}; + +mod server_impl { + use std::collections::HashMap; + + use crate::{ + block_store::BlockStoreReader, chain_view::chain_view_sync_loop, + compaction::compaction_service_loop, fragment, ingestion::ingestion_service_loop, + server::server_loop, ChainSupport, StartArgs, + }; + use error_stack::ResultExt; + use tokio_util::sync::CancellationToken; + use tracing::info; + + #[derive(Debug)] + pub struct ServerError; + + pub async fn run_server( + chain_support: CS, + args: StartArgs, + ct: CancellationToken, + ) -> error_stack::Result<(), ServerError> + where + CS: ChainSupport, + { + let object_store = args.object_store.into_object_store_client().await; + let mut etcd_client = args + .etcd + .into_etcd_client() + .await + .change_context(ServerError)?; + + let status_response = etcd_client.status().await.change_context(ServerError)?; + + info!( + version = status_response.version(), + "connected to etcd cluster" + ); + + let file_cache = args + .cache + .to_file_cache() + .await + .change_context(ServerError)?; + + let ingestion_options = args.ingestion.to_ingestion_service_options(); + + let ingestion_handle = if args.ingestion.ingestion_enabled { + let ingestion = chain_support.block_ingestion(); + tokio::spawn(ingestion_service_loop( + ingestion, + etcd_client.clone(), + object_store.clone(), + file_cache.clone(), + ingestion_options, + ct.clone(), + )) + } else { + tokio::spawn({ + let ct = ct.clone(); + async move { + ct.cancelled().await; + Ok(()) + } + }) + }; + + let (chain_view, chain_view_sync) = chain_view_sync_loop( + file_cache.clone(), + etcd_client.clone(), + object_store.clone(), + ) + .await + .change_context(ServerError) + .attach_printable("failed to start chain view sync service")?; + + let sync_handle = tokio::spawn(chain_view_sync.start(ct.clone())); + + let compaction_handle = if args.compaction.compaction_enabled { + let options = args.compaction.to_compaction_options(); + + tokio::spawn(compaction_service_loop( + etcd_client.clone(), + object_store.clone(), + chain_view.clone(), + file_cache.clone(), + options, + ct.clone(), + )) + } else { + tokio::spawn({ + let ct = ct.clone(); + async move { + ct.cancelled().await; + Ok(()) + } + }) + }; + + let block_filter_factory = chain_support.block_filter_factory(); + let fragment_id_to_name = { + let mut fragment_id_to_name = HashMap::from([ + ( + fragment::HEADER_FRAGMENT_ID, + fragment::HEADER_FRAGMENT_NAME.to_string(), + ), + ( + fragment::INDEX_FRAGMENT_ID, + fragment::INDEX_FRAGMENT_NAME.to_string(), + ), + ( + fragment::JOIN_FRAGMENT_ID, + fragment::JOIN_FRAGMENT_NAME.to_string(), + ), + ]); + + for fragment_info in chain_support.fragment_info() { + if let Some(existing) = fragment_id_to_name + .insert(fragment_info.fragment_id, fragment_info.name.clone()) + { + return Err(ServerError) + .attach_printable("duplicate fragment id") + .attach_printable_lazy(|| { + format!("fragment id: {}", fragment_info.fragment_id) + }) + .attach_printable_lazy(|| format!("existing fragment name: {}", existing)) + .attach_printable_lazy(|| { + format!("new fragment name: {}", fragment_info.name) + }); + } + } + + fragment_id_to_name + }; + + let block_store = BlockStoreReader::new(object_store.clone(), file_cache.clone()); + + let server_handle = if args.server.server_enabled { + let options = args + .server + .to_server_options() + .change_context(ServerError)?; + + tokio::spawn(server_loop( + block_filter_factory, + chain_view, + fragment_id_to_name, + block_store, + options, + ct, + )) + } else { + tokio::spawn({ + let ct = ct.clone(); + async move { + ct.cancelled().await; + Ok(()) + } + }) + }; + + tokio::select! { + ingestion = ingestion_handle => { + info!("ingestion loop terminated"); + ingestion.change_context(ServerError)?.change_context(ServerError)?; + } + compaction = compaction_handle => { + info!("compaction loop terminated"); + compaction.change_context(ServerError)?.change_context(ServerError)?; + } + sync = sync_handle => { + info!("sync loop terminated"); + sync.change_context(ServerError)?.change_context(ServerError)?; + } + server = server_handle => { + info!("server terminated"); + server.change_context(ServerError)?.change_context(ServerError)?; + } + } + + Ok(()) + } + + impl error_stack::Context for ServerError {} + + impl std::fmt::Display for ServerError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "DNA server error") + } + } +} diff --git a/common/src/object_store.rs b/common/src/object_store.rs new file mode 100644 index 00000000..bb9c1ddb --- /dev/null +++ b/common/src/object_store.rs @@ -0,0 +1,390 @@ +use apibara_etcd::normalize_prefix; +use aws_sdk_s3::{config::http::HttpResponse, error::SdkError}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use error_stack::{Report, Result, ResultExt}; +use tracing::debug; + +#[derive(Debug)] +pub enum ObjectStoreError { + /// Precondition failed. + Precondition, + /// Not modified. + NotModified, + /// Not found. + NotFound, + /// Request error. + Request, + /// Metadata is missing. + Metadata, + /// Checksum mismatch. + ChecksumMismatch, +} + +/// Options for the object store. +#[derive(Default, Clone, Debug)] +pub struct ObjectStoreOptions { + /// The S3 bucket to use. + pub bucket: String, + /// Under which prefix to store the data. + pub prefix: Option, +} + +/// This is an opinionated object store client. +#[derive(Clone)] +pub struct ObjectStore { + client: aws_sdk_s3::Client, + prefix: String, + bucket: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ObjectETag(pub String); + +#[derive(Default, Clone, Debug)] +pub struct GetOptions { + /// If the object exists, check that the ETag matches. + pub etag: Option, +} + +/// How to put an object. +#[derive(Clone, Debug)] +pub enum PutMode { + /// Overwrite the object if it exists. + Overwrite, + /// Create the object only if it doesn't exist. + Create, + /// Update the object only if it exists and the ETag matches. + Update(ObjectETag), +} + +#[derive(Default, Clone, Debug)] +pub struct PutOptions { + pub mode: PutMode, +} + +#[derive(Default, Clone, Debug)] +pub struct DeleteOptions {} + +#[derive(Debug)] +pub struct GetResult { + pub body: Bytes, + pub etag: ObjectETag, +} + +#[derive(Debug)] +pub struct PutResult { + pub etag: ObjectETag, +} + +#[derive(Debug)] +pub struct DeleteResult; + +impl ObjectStore { + pub fn new(config: aws_config::SdkConfig, options: ObjectStoreOptions) -> Self { + Self::new_from_config((&config).into(), options) + } + + pub fn new_from_config(config: aws_sdk_s3::Config, options: ObjectStoreOptions) -> Self { + let client = aws_sdk_s3::Client::from_conf(config); + + let prefix = normalize_prefix(options.prefix); + + Self { + client, + bucket: options.bucket, + prefix, + } + } + + pub async fn new_from_env(options: ObjectStoreOptions) -> Self { + let config = aws_config::load_from_env().await; + Self::new(config, options) + } + + /// Ensure the currently configured bucket exists. + pub async fn ensure_bucket(&self) -> Result<(), ObjectStoreError> { + self.client + .create_bucket() + .bucket(&self.bucket) + .send() + .await + .change_to_object_store_context() + .attach_printable("failed to create bucket") + .attach_printable_lazy(|| format!("bucket name: {}", self.bucket))?; + Ok(()) + } + + #[tracing::instrument(name = "object_store_get", skip(self, options))] + pub async fn get( + &self, + path: &str, + options: GetOptions, + ) -> Result { + let key = self.full_key(path); + let response = self + .client + .get_object() + .bucket(&self.bucket) + .key(&key) + .customize() + .mutate_request(move |request| { + if let Some(etag) = &options.etag { + request.headers_mut().insert("If-Match", etag.0.clone()); + } + }) + .send() + .await + .change_to_object_store_context() + .attach_printable("failed to get object") + .attach_printable_lazy(|| format!("key: {key}"))?; + + let etag = response + .e_tag + .ok_or(ObjectStoreError::Metadata) + .attach_printable("missing etag")? + .into(); + + let body = response + .body + .collect() + .await + .change_context(ObjectStoreError::Request) + .attach_printable("failed to read object body")?; + + let decompressed = BytesMut::with_capacity(body.remaining()); + let mut writer = decompressed.writer(); + zstd::stream::copy_decode(&mut body.reader(), &mut writer) + .change_context(ObjectStoreError::Request)?; + let decompressed = writer.into_inner(); + + let checksum = decompressed[decompressed.len() - 4..].as_ref().get_u32(); + let data = decompressed[..decompressed.len() - 4].as_ref(); + + if crc32fast::hash(data) != checksum { + return Err(ObjectStoreError::ChecksumMismatch) + .attach_printable("checksum mismatch") + .attach_printable_lazy(|| format!("key: {key}")); + } + + let body = Bytes::copy_from_slice(data); + + Ok(GetResult { body, etag }) + } + + #[tracing::instrument(name = "object_store_put", skip_all, fields(key, compression_ratio))] + pub async fn put( + &self, + path: &str, + body: Bytes, + options: PutOptions, + ) -> Result { + let current_span = tracing::Span::current(); + + let key = self.full_key(path); + let size_before = body.len(); + + let checksum = crc32fast::hash(&body); + let mut body = BytesMut::from(body); + body.put_u32(checksum); + + let mut compressed = BytesMut::with_capacity(body.len()).writer(); + zstd::stream::copy_encode(body.reader(), &mut compressed, 0) + .change_context(ObjectStoreError::Request)?; + let compressed = compressed.into_inner(); + + let size_after = compressed.len(); + let compression_ratio = size_before as f64 / size_after as f64; + + current_span.record("key", &key); + current_span.record("compression_ratio", compression_ratio); + debug!(compression_ratio, key, "compressed object"); + + let response = self + .client + .put_object() + .bucket(&self.bucket) + .key(&key) + .body(compressed.freeze().into()) + .customize() + .mutate_request(move |request| match &options.mode { + PutMode::Overwrite => {} + PutMode::Create => { + // If-None-Match: "*" seems to be better supported than If-Match: "". + request.headers_mut().insert("If-None-Match", "*"); + } + PutMode::Update(etag) => { + request.headers_mut().insert("If-Match", etag.0.clone()); + } + }) + .send() + .await + .change_to_object_store_context() + .attach_printable("failed to put object") + .attach_printable_lazy(|| format!("key: {key}"))?; + + let etag = response + .e_tag + .ok_or(ObjectStoreError::Metadata) + .attach_printable("missing etag")? + .into(); + + Ok(PutResult { etag }) + } + + #[tracing::instrument(name = "object_store_delete", skip(self, _options))] + pub async fn delete( + &self, + path: &str, + _options: DeleteOptions, + ) -> Result { + let key = self.full_key(path); + self.client + .delete_object() + .bucket(&self.bucket) + .key(&key) + .send() + .await + .change_to_object_store_context() + .attach_printable("failed to delete object") + .attach_printable_lazy(|| format!("key: {key}"))?; + + Ok(DeleteResult) + } + + fn full_key(&self, path: &str) -> String { + format!("{}{}", self.prefix, path) + } +} + +impl error_stack::Context for ObjectStoreError {} + +impl std::fmt::Display for ObjectStoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ObjectStoreError::Precondition => write!(f, "object store: precondition failed"), + ObjectStoreError::NotModified => write!(f, "object store: not modified"), + ObjectStoreError::NotFound => write!(f, "object store: not found"), + ObjectStoreError::Request => write!(f, "object store: request error"), + ObjectStoreError::Metadata => write!(f, "object store: metadata is missing or invalid"), + ObjectStoreError::ChecksumMismatch => write!(f, "object store: checksum mismatch"), + } + } +} + +pub trait ObjectStoreResultExt { + fn is_precondition(&self) -> bool; + fn is_not_modified(&self) -> bool; + fn is_not_found(&self) -> bool; +} + +impl ObjectStoreResultExt for Report { + fn is_precondition(&self) -> bool { + matches!(self.current_context(), ObjectStoreError::Precondition) + } + + fn is_not_modified(&self) -> bool { + matches!(self.current_context(), ObjectStoreError::NotModified) + } + + fn is_not_found(&self) -> bool { + matches!(self.current_context(), ObjectStoreError::NotFound) + } +} + +impl From for ObjectETag { + fn from(value: String) -> Self { + Self(value) + } +} + +impl Default for PutMode { + fn default() -> Self { + Self::Overwrite + } +} + +trait ToObjectStoreResult: Sized { + type Ok; + + fn change_to_object_store_context(self) -> Result; +} + +impl ToObjectStoreResult for std::result::Result> +where + SdkError: error_stack::Context, +{ + type Ok = T; + + fn change_to_object_store_context(self) -> Result { + match self { + Ok(value) => Ok(value), + Err(err) => match err.raw_response().map(|r| r.status().as_u16()) { + Some(412) => Err(err).change_context(ObjectStoreError::Precondition), + Some(304) => Err(err).change_context(ObjectStoreError::NotModified), + Some(404) => Err(err).change_context(ObjectStoreError::NotFound), + _ => Err(err).change_context(ObjectStoreError::Request), + }, + } + } +} + +pub mod testing { + use aws_config::{meta::region::RegionProviderChain, BehaviorVersion}; + use aws_sdk_s3::config::Credentials; + use futures::Future; + use testcontainers::{core::WaitFor, ContainerAsync, Image}; + + pub struct MinIO; + + pub trait MinIOExt { + fn s3_config(&self) -> impl Future + Send; + } + + impl Image for MinIO { + fn name(&self) -> &str { + "minio/minio" + } + + fn tag(&self) -> &str { + "latest" + } + + fn ready_conditions(&self) -> Vec { + Vec::default() + } + + fn cmd(&self) -> impl IntoIterator>> { + vec!["server", "/data"] + } + } + + pub fn minio_container() -> MinIO { + MinIO + } + + impl MinIOExt for ContainerAsync { + async fn s3_config(&self) -> aws_sdk_s3::Config { + let port = self + .get_host_port_ipv4(9000) + .await + .expect("MinIO port 9000"); + s3_config_at_port(port).await + } + } + + pub async fn s3_config_at_port(port: u16) -> aws_sdk_s3::Config { + let endpoint = format!("http://localhost:{}", port); + let region_provider = RegionProviderChain::default_provider().or_else("us-east-1"); + let credentials = Credentials::new("minioadmin", "minioadmin", None, None, "test"); + + let config = aws_config::defaults(BehaviorVersion::latest()) + .region(region_provider) + .endpoint_url(endpoint) + .credentials_provider(credentials) + .load() + .await; + + let config: aws_sdk_s3::Config = (&config).into(); + config.to_builder().force_path_style(true).build() + } +} diff --git a/common/src/options_store.rs b/common/src/options_store.rs new file mode 100644 index 00000000..8a7070a8 --- /dev/null +++ b/common/src/options_store.rs @@ -0,0 +1,102 @@ +use apibara_etcd::{EtcdClient, KvClient}; +use error_stack::{Result, ResultExt}; + +pub static OPTIONS_PREFIX_KEY: &str = "options/"; +pub static CHAIN_SEGMENT_SIZE_KEY: &str = "options/chain_segment_size"; +pub static SEGMENT_SIZE_KEY: &str = "options/segment_size"; +pub static GROUP_SIZE_KEY: &str = "options/group_size"; + +#[derive(Debug)] +pub struct OptionsStoreError; + +/// A client to get and set DNA options. +pub struct OptionsStore { + client: KvClient, +} + +impl OptionsStore { + pub fn new(client: &EtcdClient) -> Self { + let client = client.kv_client(); + Self { client } + } + + pub async fn set_chain_segment_size(&mut self, size: usize) -> Result<(), OptionsStoreError> { + self.set_usize(CHAIN_SEGMENT_SIZE_KEY, size) + .await + .attach_printable("failed to set chain segment size") + } + + pub async fn set_segment_size(&mut self, size: usize) -> Result<(), OptionsStoreError> { + self.set_usize(SEGMENT_SIZE_KEY, size) + .await + .attach_printable("failed to set segment size") + } + + pub async fn set_group_size(&mut self, size: usize) -> Result<(), OptionsStoreError> { + self.set_usize(GROUP_SIZE_KEY, size) + .await + .attach_printable("failed to set group size") + } + + pub async fn get_chain_segment_size(&mut self) -> Result, OptionsStoreError> { + self.get_usize(CHAIN_SEGMENT_SIZE_KEY) + .await + .attach_printable("failed to get chain segment size") + } + + pub async fn get_segment_size(&mut self) -> Result, OptionsStoreError> { + self.get_usize(SEGMENT_SIZE_KEY) + .await + .attach_printable("failed to get segment size") + } + + pub async fn get_group_size(&mut self) -> Result, OptionsStoreError> { + self.get_usize(GROUP_SIZE_KEY) + .await + .attach_printable("failed to get group size") + } + + async fn set_usize(&mut self, key: &str, size: usize) -> Result<(), OptionsStoreError> { + let size = size.to_string(); + self.client + .put(key, size.as_bytes()) + .await + .change_context(OptionsStoreError) + .attach_printable("failed to set size")?; + + Ok(()) + } + + async fn get_usize(&mut self, key: &str) -> Result, OptionsStoreError> { + let response = self + .client + .get(key) + .await + .change_context(OptionsStoreError) + .attach_printable("failed to get size options")?; + + let Some(kv) = response.kvs().first() else { + return Ok(None); + }; + + let size = String::from_utf8(kv.value().to_vec()) + .change_context(OptionsStoreError) + .attach_printable("failed to decode size")?; + + let size = size + .parse::() + .change_context(OptionsStoreError) + .attach_printable("failed to parse size") + .attach_printable_lazy(|| format!("size: {}", size))?; + + Ok(size.into()) + } +} + +impl error_stack::Context for OptionsStoreError {} + +impl std::fmt::Display for OptionsStoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "options store error") + } +} diff --git a/common/src/query.rs b/common/src/query.rs new file mode 100644 index 00000000..0daac164 --- /dev/null +++ b/common/src/query.rs @@ -0,0 +1,112 @@ +use std::collections::BTreeMap; + +use error_stack::Result; +use roaring::RoaringBitmap; +use tracing::trace; + +use crate::{ + fragment::{ArchivedIndexFragment, FragmentId, IndexId}, + index::{self, ScalarValue}, +}; + +pub type FilterId = u32; + +/// Filter a fragment based on the values from this index. +#[derive(Debug, Clone)] +pub struct Condition { + /// The index to filter on. + pub index_id: IndexId, + /// The value to filter on. + pub key: ScalarValue, +} + +/// A single filter. +#[derive(Debug, Clone)] +pub struct Filter { + /// The filter id. + pub filter_id: FilterId, + /// The fragment to filter. + pub fragment_id: FragmentId, + /// The conditions to filter on. + /// + /// These conditions are logically ANDed together. + pub conditions: Vec, + /// Join results from this filter with the given fragments. + pub joins: Vec, +} + +/// A collection of filters. +#[derive(Debug, Clone, Default)] +pub struct BlockFilter { + pub always_include_header: bool, + filters: BTreeMap>, +} + +impl BlockFilter { + pub fn set_always_include_header(&mut self, value: bool) { + self.always_include_header = value; + } + + /// Add a filter to the block filter. + pub fn add_filter(&mut self, filter: Filter) { + self.filters + .entry(filter.fragment_id) + .or_default() + .push(filter); + } + + /// Returns an iterator over the filters, grouped by fragment. + pub fn iter(&self) -> impl Iterator)> { + self.filters.iter() + } + + pub fn is_empty(&self) -> bool { + self.filters.is_empty() + } + + pub fn len(&self) -> usize { + self.filters.len() + } +} + +#[derive(Debug)] +pub struct FilterError; + +impl Filter { + pub fn filter(&self, indexes: &ArchivedIndexFragment) -> Result { + let range_start = indexes.range_start.to_native(); + let range_len = indexes.range_len.to_native(); + let mut result = RoaringBitmap::from_iter(range_start..(range_start + range_len)); + trace!(starting = ?result, "starting bitmap"); + + for cond in self.conditions.iter() { + let cond_index = indexes + .indexes + .get(cond.index_id as usize) + .ok_or(FilterError)?; + + match &cond_index.index { + index::ArchivedIndex::Bitmap(bitmap) => { + if let Some(bitmap) = bitmap.get(&cond.key) { + result &= bitmap; + trace!(result = ?result, "bitmap match"); + } else { + trace!("no match"); + result.clear(); + break; + } + } + } + } + + Ok(result) + } +} + +impl error_stack::Context for FilterError {} + +impl std::fmt::Display for FilterError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "failed to filter block") + } +} diff --git a/common/src/rkyv.rs b/common/src/rkyv.rs new file mode 100644 index 00000000..51973c62 --- /dev/null +++ b/common/src/rkyv.rs @@ -0,0 +1,64 @@ +//! rkyv helpers. + +use rkyv::{ + api::high::{HighSerializer, HighValidator}, + rancor::Fallible, + ser::{allocator::ArenaHandle, Writer, WriterExt}, + util::AlignedVec, + vec::{ArchivedVec, VecResolver}, + with::{ArchiveWith, DeserializeWith, SerializeWith}, + Archive, Deserialize, Place, Serialize, +}; + +pub trait Serializable<'a>: + rkyv::Serialize, rkyv::rancor::Error>> +{ +} + +pub trait Checked<'a>: rkyv::bytecheck::CheckBytes> {} + +impl<'a, T> Serializable<'a> for T where + T: rkyv::Serialize, rkyv::rancor::Error>> +{ +} + +impl<'a, T> Checked<'a> for T where + T: rkyv::bytecheck::CheckBytes> +{ +} + +/// Wrapper type to align `Vec` fields. +pub struct Aligned; + +impl ArchiveWith> for Aligned { + type Archived = ArchivedVec; + type Resolver = VecResolver; + + fn resolve_with(field: &Vec, resolver: Self::Resolver, out: Place) { + field.resolve(resolver, out); + } +} + +impl SerializeWith, S> for Aligned +where + S: Fallible + Writer + ?Sized, + Vec: Serialize, +{ + fn serialize_with(field: &Vec, serializer: &mut S) -> Result { + serializer.align(N)?; + field.serialize(serializer) + } +} + +impl DeserializeWith, Vec, D> for Aligned +where + D: Fallible + ?Sized, + ArchivedVec: Deserialize, D>, +{ + fn deserialize_with( + field: &ArchivedVec, + deserializer: &mut D, + ) -> Result, D::Error> { + field.deserialize(deserializer) + } +} diff --git a/common/src/segment.rs b/common/src/segment.rs new file mode 100644 index 00000000..ec70e5cb --- /dev/null +++ b/common/src/segment.rs @@ -0,0 +1,35 @@ +//! A segment is a collection of fragments from different blocks. + +use bytes::Bytes; +use rkyv::{Archive, Deserialize, Serialize}; + +use crate::{fragment::IndexGroupFragment, Cursor}; + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct FragmentData { + pub cursor: Cursor, + pub data: T, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct Segment { + /// The first block in the segment. + pub first_block: Cursor, + /// The segment body. + pub data: Vec>, +} + +#[derive(Archive, Serialize, Deserialize, Debug)] +pub struct SegmentGroup { + /// The first block in the segment. + pub first_block: Cursor, + /// The segment body. + pub index: IndexGroupFragment, +} + +#[derive(Debug)] +/// A segment ready to be written to the storage. +pub struct SerializedSegment { + pub name: String, + pub data: Bytes, +} diff --git a/common/src/server/cli.rs b/common/src/server/cli.rs new file mode 100644 index 00000000..1f42b6b0 --- /dev/null +++ b/common/src/server/cli.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; + +use clap::Args; +use error_stack::{Result, ResultExt}; + +use crate::server::ServerOptions; + +use super::{error::ServerError, StreamServiceOptions}; + +#[derive(Args, Debug)] +pub struct ServerArgs { + /// Whether to run the DNA server. + #[clap(long = "server.enabled", env = "DNA_SERVER_ENABLED")] + pub server_enabled: bool, + /// The DNA server address. + #[clap( + long = "server.address", + env = "DNA_SERVER_ADDRESS", + default_value = "0.0.0.0:7007" + )] + pub server_address: String, + /// Maximum number of concurrent streams served. + #[clap( + long = "server.max-concurrent-streams", + env = "DNA_SERVER_MAX_CONCURRENT_STREAMS", + default_value = "1000" + )] + pub max_concurrent_streams: usize, +} + +impl ServerArgs { + pub fn to_server_options(&self) -> Result { + let address = self + .server_address + .parse::() + .change_context(ServerError) + .attach_printable("failed to parse server address") + .attach_printable_lazy(|| format!("address: {}", self.server_address))?; + + let stream_service_options = StreamServiceOptions { + max_concurrent_streams: self.max_concurrent_streams, + }; + + Ok(ServerOptions { + address, + stream_service_options, + }) + } +} diff --git a/common/src/server/error.rs b/common/src/server/error.rs new file mode 100644 index 00000000..e2e3fa7d --- /dev/null +++ b/common/src/server/error.rs @@ -0,0 +1,10 @@ +#[derive(Debug)] +pub struct ServerError; + +impl error_stack::Context for ServerError {} + +impl std::fmt::Display for ServerError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "server error") + } +} diff --git a/common/src/server/mod.rs b/common/src/server/mod.rs new file mode 100644 index 00000000..5c19b239 --- /dev/null +++ b/common/src/server/mod.rs @@ -0,0 +1,73 @@ +mod cli; +mod error; +mod service; + +use std::collections::HashMap; +use std::net::SocketAddr; + +use apibara_dna_protocol::dna::stream::dna_stream_file_descriptor_set; +use error::ServerError; +use error_stack::{Result, ResultExt}; +use service::StreamService; +use tokio_util::sync::CancellationToken; +use tonic::transport::Server as TonicServer; +use tracing::info; + +use crate::{ + block_store::BlockStoreReader, chain_view::ChainView, data_stream::BlockFilterFactory, + fragment::FragmentId, +}; + +pub use self::cli::ServerArgs; +pub use self::service::StreamServiceOptions; + +#[derive(Debug, Clone)] +pub struct ServerOptions { + /// The server address. + pub address: SocketAddr, + /// Stream service options. + pub stream_service_options: StreamServiceOptions, +} + +pub async fn server_loop( + filter_factory: BFF, + chain_view: tokio::sync::watch::Receiver>, + fragment_id_to_name: HashMap, + block_store: BlockStoreReader, + options: ServerOptions, + ct: CancellationToken, +) -> Result<(), ServerError> +where + BFF: BlockFilterFactory + Send + Sync + 'static, +{ + let (_health_reporter, health_service) = tonic_health::server::health_reporter(); + + let reflection_service = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(dna_stream_file_descriptor_set()) + .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) + .build_v1() + .change_context(ServerError) + .attach_printable("failed to create gRPC reflection service")?; + + let stream_service = StreamService::new( + filter_factory, + chain_view, + fragment_id_to_name, + block_store, + options.stream_service_options, + ct.clone(), + ); + + info!(address = %options.address, "starting DNA server"); + + TonicServer::builder() + .add_service(health_service) + .add_service(reflection_service) + .add_service(stream_service.into_service()) + .serve_with_shutdown(options.address, { + let ct = ct.clone(); + async move { ct.cancelled().await } + }) + .await + .change_context(ServerError) +} diff --git a/common/src/server/service.rs b/common/src/server/service.rs new file mode 100644 index 00000000..edce8342 --- /dev/null +++ b/common/src/server/service.rs @@ -0,0 +1,242 @@ +use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration}; + +use apibara_dna_protocol::dna::stream::{ + dna_stream_server::{self, DnaStream}, + DataFinality, StatusRequest, StatusResponse, StreamDataRequest, StreamDataResponse, +}; +use error_stack::Result; +use futures::{Future, Stream, StreamExt, TryFutureExt}; +use tokio::sync::{mpsc, Semaphore}; +use tokio_stream::wrappers::ReceiverStream; +use tokio_util::sync::CancellationToken; +use tracing::{error, info}; + +use crate::{ + block_store::BlockStoreReader, + chain_view::{CanonicalCursor, ChainView, ChainViewError}, + data_stream::{BlockFilterFactory, DataStream}, + fragment::FragmentId, + Cursor, +}; + +const CHANNEL_SIZE: usize = 1024; + +static STREAM_SEMAPHORE_ACQUIRE_TIMEOUT: Duration = Duration::from_secs(1); + +#[derive(Debug, Clone)] +pub struct StreamServiceOptions { + /// Maximum number of concurrent streams. + pub max_concurrent_streams: usize, +} + +pub struct StreamService +where + BFF: BlockFilterFactory, +{ + filter_factory: BFF, + stream_semaphore: Arc, + chain_view: tokio::sync::watch::Receiver>, + fragment_id_to_name: HashMap, + block_store: BlockStoreReader, + ct: CancellationToken, +} + +impl StreamService +where + BFF: BlockFilterFactory, +{ + pub fn new( + filter_factory: BFF, + chain_view: tokio::sync::watch::Receiver>, + fragment_id_to_name: HashMap, + block_store: BlockStoreReader, + options: StreamServiceOptions, + ct: CancellationToken, + ) -> Self { + let stream_semaphore = Arc::new(Semaphore::new(options.max_concurrent_streams)); + Self { + filter_factory, + stream_semaphore, + chain_view, + fragment_id_to_name, + block_store, + ct, + } + } + + pub fn into_service(self) -> dna_stream_server::DnaStreamServer { + dna_stream_server::DnaStreamServer::new(self) + } +} + +#[tonic::async_trait] +impl DnaStream for StreamService +where + BFF: BlockFilterFactory + Send + Sync + 'static, +{ + type StreamDataStream = Pin< + Box> + Send + 'static>, + >; + + async fn status( + &self, + _request: tonic::Request, + ) -> tonic::Result, tonic::Status> { + let Some(chain_view) = self.chain_view.borrow().clone() else { + return Err(tonic::Status::unavailable("chain view not initialized yet")); + }; + + let response = chain_view.get_status().await.map_err(|err| { + error!(error = ?err, "DnaStream::status error"); + tonic::Status::internal("internal server error") + })?; + + Ok(tonic::Response::new(response)) + } + + async fn stream_data( + &self, + request: tonic::Request, + ) -> tonic::Result, tonic::Status> { + let request = request.into_inner(); + info!(request = ?request, "stream data request"); + + let Some(chain_view) = self.chain_view.borrow().clone() else { + return Err(tonic::Status::unavailable("chain view not initialized yet")); + }; + + let permit = match tokio::time::timeout( + STREAM_SEMAPHORE_ACQUIRE_TIMEOUT, + self.stream_semaphore.clone().acquire_owned(), + ) + .await + { + Err(_) => { + return Err(tonic::Status::resource_exhausted("too many streams")); + } + Ok(Err(_)) => return Err(tonic::Status::internal("internal server error")), + Ok(Ok(permit)) => permit, + }; + + // Validate starting cursor by checking it's in range. + // The block could be reorged but that's handled by the `DataStream`. + let starting_cursor = if let Some(cursor) = request.starting_cursor { + let cursor = Cursor::from(cursor); + chain_view.ensure_cursor_in_range(&cursor).await?; + cursor.into() + } else { + None + }; + + let finalized = chain_view + .get_finalized_cursor() + .await + .map_err(|_| tonic::Status::internal("internal server error"))?; + + // Convert finality. + let finality: DataFinality = request + .finality + .map(TryFrom::try_from) + .transpose() + .map_err(|_| tonic::Status::invalid_argument("invalid finality"))? + .unwrap_or(DataFinality::Accepted); + + let heartbeat_interval = request + .heartbeat_interval + .map(TryFrom::try_from) + .transpose() + .map_err(|_| tonic::Status::invalid_argument("invalid heartbeat interval")) + .and_then(validate_heartbeat_interval)?; + + // Parse and validate filter. + let filter = self.filter_factory.create_block_filter(&request.filter)?; + + let ds = DataStream::new( + filter, + starting_cursor, + finalized, + finality, + heartbeat_interval, + chain_view, + self.fragment_id_to_name.clone(), + self.block_store.clone(), + permit, + ); + let (tx, rx) = mpsc::channel(CHANNEL_SIZE); + + tokio::spawn(ds.start(tx, self.ct.clone()).inspect_err(|err| { + error!(error = ?err, "data stream error"); + })); + + let stream = ReceiverStream::new(rx).boxed(); + + Ok(tonic::Response::new(stream)) + } +} + +trait ChainViewExt { + fn get_status(&self) -> impl Future> + Send; + fn ensure_cursor_in_range( + &self, + cursor: &Cursor, + ) -> impl Future> + Send; +} + +impl ChainViewExt for ChainView { + async fn get_status(&self) -> Result { + let starting = self.get_starting_cursor().await?; + let finalized = self.get_finalized_cursor().await?; + let head = self.get_head().await?; + + Ok(StatusResponse { + current_head: None, + last_ingested: Some(head.into()), + finalized: Some(finalized.into()), + starting: Some(starting.into()), + }) + } + + async fn ensure_cursor_in_range(&self, cursor: &Cursor) -> tonic::Result<(), tonic::Status> { + // If the cursor is _after_ the last ingested block, it's out of range because eventually + // it will become available. + match self + .get_canonical(cursor.number) + .await + .map_err(|_| tonic::Status::internal("internal server error"))? + { + CanonicalCursor::AfterAvailable(last) => Err(tonic::Status::out_of_range(format!( + "cursor {} is after the last ingested block {}", + cursor.number, last.number + ))), + CanonicalCursor::BeforeAvailable(first) => { + Err(tonic::Status::invalid_argument(format!( + "cursor {} is before the first ingested block {}", + cursor.number, first.number + ))) + } + CanonicalCursor::Canonical(_) => Ok(()), + } + } +} + +fn validate_heartbeat_interval( + heartbeat_interval: Option, +) -> tonic::Result { + let Some(heartbeat_interval) = heartbeat_interval else { + return Ok(Duration::from_secs(30)); + }; + + if heartbeat_interval < Duration::from_secs(10) { + Err(tonic::Status::invalid_argument(format!( + "heartbeat interval must be at least 10 seconds, got {}", + heartbeat_interval.as_secs() + ))) + } else if heartbeat_interval > Duration::from_secs(60) { + Err(tonic::Status::invalid_argument(format!( + "heartbeat interval must be at most 60 seconds, got {}", + heartbeat_interval.as_secs() + ))) + } else { + Ok(heartbeat_interval) + } +} diff --git a/common/tests/test_object_store.rs b/common/tests/test_object_store.rs new file mode 100644 index 00000000..60ae35b5 --- /dev/null +++ b/common/tests/test_object_store.rs @@ -0,0 +1,281 @@ +use testcontainers::runners::AsyncRunner; + +use apibara_dna_common::object_store::{ + testing::{minio_container, MinIOExt}, + DeleteOptions, GetOptions, ObjectETag, ObjectStore, ObjectStoreOptions, ObjectStoreResultExt, + PutMode, PutOptions, +}; + +#[tokio::test] +async fn test_put_and_get_no_prefix_no_precondition() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + let client = ObjectStore::new_from_config( + config, + ObjectStoreOptions { + bucket: "test".to_string(), + ..Default::default() + }, + ); + + client.ensure_bucket().await.unwrap(); + + let put_res = client + .put("test", "Hello, World".into(), PutOptions::default()) + .await + .unwrap(); + + assert_eq!( + put_res.etag, + ObjectETag("\"600335e986d6c8ce1e348d20d6d16045\"".to_string()) + ); + + let get_res = client.get("test", GetOptions::default()).await.unwrap(); + assert_eq!(get_res.etag, put_res.etag); + assert_eq!(get_res.body, "Hello, World".as_bytes()); +} + +#[tokio::test] +async fn test_put_and_get_with_prefix_no_precondition() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + // Put an object in the bucket with prefix. + // Put an object with the same filename in the bucket without prefix. + // Check that they are indeed different. + let client = ObjectStore::new_from_config( + config.clone(), + ObjectStoreOptions { + bucket: "test".to_string(), + prefix: Some("my-prefix".to_string()), + }, + ); + + client.ensure_bucket().await.unwrap(); + + client + .put("test", "With my-prefix".into(), PutOptions::default()) + .await + .unwrap(); + + { + let client = ObjectStore::new_from_config( + config, + ObjectStoreOptions { + bucket: "test".to_string(), + prefix: None, + }, + ); + client + .put("test", "Without prefix".into(), PutOptions::default()) + .await + .unwrap(); + } + + let get_res = client.get("test", GetOptions::default()).await.unwrap(); + assert_eq!(get_res.body, "With my-prefix".as_bytes()); +} + +#[tokio::test] +async fn test_get_with_etag() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + let client = ObjectStore::new_from_config( + config.clone(), + ObjectStoreOptions { + bucket: "test".to_string(), + ..Default::default() + }, + ); + + client.ensure_bucket().await.unwrap(); + + let put_res = client + .put("test", "Hello, World".into(), PutOptions::default()) + .await + .unwrap(); + + client + .get( + "test", + GetOptions { + etag: Some(put_res.etag), + }, + ) + .await + .unwrap(); + + let response = client + .get( + "test", + GetOptions { + etag: Some(ObjectETag("bad etag".to_string())), + }, + ) + .await; + + assert!(response.is_err()); + assert!(response.unwrap_err().is_precondition()); +} + +#[tokio::test] +async fn test_put_with_overwrite() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + let client = ObjectStore::new_from_config( + config.clone(), + ObjectStoreOptions { + bucket: "test".to_string(), + ..Default::default() + }, + ); + + client.ensure_bucket().await.unwrap(); + + let put_res = client + .put("test", "Hello, World".into(), PutOptions::default()) + .await + .unwrap(); + + let original_etag = put_res.etag; + + let put_res = client + .put( + "test", + "Something else".into(), + PutOptions { + mode: PutMode::Overwrite, + }, + ) + .await + .unwrap(); + + assert_ne!(put_res.etag, original_etag); +} + +#[tokio::test] +async fn test_put_with_create() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + let client = ObjectStore::new_from_config( + config.clone(), + ObjectStoreOptions { + bucket: "test".to_string(), + ..Default::default() + }, + ); + + client.ensure_bucket().await.unwrap(); + + client + .put( + "test", + "Hello, World".into(), + PutOptions { + mode: PutMode::Create, + }, + ) + .await + .unwrap(); + + let response = client + .put( + "test", + "Something else".into(), + PutOptions { + mode: PutMode::Create, + }, + ) + .await; + + assert!(response.is_err()); + assert!(response.unwrap_err().is_precondition()); +} + +#[tokio::test] +async fn test_put_with_update() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + let client = ObjectStore::new_from_config( + config.clone(), + ObjectStoreOptions { + bucket: "test".to_string(), + ..Default::default() + }, + ); + + client.ensure_bucket().await.unwrap(); + + let response = client + .put( + "test", + "Hello, World".into(), + PutOptions { + mode: PutMode::Create, + }, + ) + .await + .unwrap(); + + let original_etag = response.etag; + + let response = client + .put( + "test", + "Something else".into(), + PutOptions { + mode: PutMode::Update("bad etag".to_string().into()), + }, + ) + .await; + assert!(response.is_err()); + assert!(response.unwrap_err().is_precondition()); + + let response = client + .put( + "test", + "Something else".into(), + PutOptions { + mode: PutMode::Update(original_etag.clone()), + }, + ) + .await + .unwrap(); + + assert_ne!(response.etag, original_etag); +} + +#[tokio::test] +async fn test_delete() { + let minio = minio_container().start().await.unwrap(); + let config = minio.s3_config().await; + + let client = ObjectStore::new_from_config( + config.clone(), + ObjectStoreOptions { + bucket: "test".to_string(), + ..Default::default() + }, + ); + + client.ensure_bucket().await.unwrap(); + + client + .put("test", "Hello, World".into(), PutOptions::default()) + .await + .unwrap(); + + client + .delete("test", DeleteOptions::default()) + .await + .unwrap(); + + let response = client.get("test", GetOptions::default()).await; + assert!(response.is_err()); + assert!(response.unwrap_err().is_not_found()); +} diff --git a/core/README.md b/core/README.md deleted file mode 100644 index d00c69a5..00000000 --- a/core/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Apibara Core Protocol - -This crate defines the core types used by Apibara. diff --git a/core/build.rs b/core/build.rs deleted file mode 100644 index ff328b0a..00000000 --- a/core/build.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::{env, path::PathBuf}; - -static NODE_DESCRIPTOR_FILE: &str = "node_v1alpha2_descriptor.bin"; -static STARKNET_DESCRIPTOR_FILE: &str = "starknet_v1alpha2_descriptor.bin"; - -fn main() -> Result<(), Box> { - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - println!("cargo:rerun-if-changed=proto/node/v1alpha2"); - println!("cargo:rerun-if-changed=proto/starknet/v1alpha2"); - println!("cargo:rerun-if-changed=proto/quota/v1"); - - tonic_build::configure() - .build_client(true) - .build_server(true) - .protoc_arg("--experimental_allow_proto3_optional") - .file_descriptor_set_path(out_dir.join(NODE_DESCRIPTOR_FILE)) - .compile(&["proto/node/v1alpha2/stream.proto"], &["proto/node"])?; - - tonic_build::configure() - .build_client(true) - .build_server(true) - .protoc_arg("--experimental_allow_proto3_optional") - .file_descriptor_set_path(out_dir.join(STARKNET_DESCRIPTOR_FILE)) - .compile_well_known_types(true) - .extern_path(".google.protobuf", "::pbjson_types") - .compile( - &[ - "proto/starknet/v1alpha2/starknet.proto", - "proto/starknet/v1alpha2/filter.proto", - ], - &["proto/starknet"], - )?; - - // only add jsonpb definitions for finality. cursor is implemented manually. - let node_description_set = std::fs::read(out_dir.join(NODE_DESCRIPTOR_FILE))?; - pbjson_build::Builder::new() - .register_descriptors(&node_description_set)? - .exclude([".apibara.node.v1alpha2.Cursor"]) - .build(&[".apibara"])?; - - // add jsonpb definitions, but only for the data types - let starknet_description_set = std::fs::read(out_dir.join(STARKNET_DESCRIPTOR_FILE))?; - pbjson_build::Builder::new() - .register_descriptors(&starknet_description_set)? - .exclude([".apibara.starknet.v1alpha2.FieldElement"]) - .build(&[".apibara"])?; - - // we only need the client for the quota service. - tonic_build::configure() - .build_client(true) - .build_server(false) - .protoc_arg("--experimental_allow_proto3_optional") - .compile(&["proto/quota/v1/quota.proto"], &["proto/quota"])?; - - Ok(()) -} diff --git a/core/proto/node/v1alpha2/stream.proto b/core/proto/node/v1alpha2/stream.proto deleted file mode 100644 index 6c236c93..00000000 --- a/core/proto/node/v1alpha2/stream.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Apibara Stream service. -syntax = "proto3"; - -package apibara.node.v1alpha2; - -service Stream { - // Stream data from the node (bi-directional). - rpc StreamData(stream StreamDataRequest) returns (stream StreamDataResponse); - // Stream data from the node. - rpc StreamDataImmutable(StreamDataRequest) - returns (stream StreamDataResponse); - // Get DNA service status. - rpc Status(StatusRequest) returns (StatusResponse); -} - -// Request data to be streamed. -message StreamDataRequest { - // Used by the client to uniquely identify a stream. - // All streams use `stream_id = 0` by default. - optional uint64 stream_id = 1; - // How many items to send in a single response. - optional uint64 batch_size = 2; - // Start streaming from the provided cursor. - Cursor starting_cursor = 3; - // Return data with the specified finality. - // If not specified, defaults to `DATA_STATUS_ACCEPTED`. - optional DataFinality finality = 4; - // Return data according to the stream-specific filter. - bytes filter = 5; - // Combine multiple filters in the same stream. - repeated bytes multi_filter = 6; -} - -// Contains the data requested from the client. -message StreamDataResponse { - // The stream id. - uint64 stream_id = 1; - oneof message { - Invalidate invalidate = 2; - Data data = 3; - Heartbeat heartbeat = 4; - } -} - -// A cursor over the stream content. -message Cursor { - // Key used for ordering messages in the stream. - uint64 order_key = 1; - // Key used to discriminate branches in the stream. - bytes unique_key = 2; -} - -// Data finality. -enum DataFinality { - DATA_STATUS_UNKNOWN = 0; - // Data was received, but is not part of the canonical chain yet. - DATA_STATUS_PENDING = 1; - // Data is now part of the canonical chain, but could still be invalidated. - DATA_STATUS_ACCEPTED = 2; - // Data is finalized and cannot be invalidated. - DATA_STATUS_FINALIZED = 3; -} - -// Invalidate data after the given cursor. -message Invalidate { - // The cursor of the message before the now invalid data. - Cursor cursor = 1; -} - -// A batch of data. -message Data { - // Cursor of the last item in the batch. - Cursor end_cursor = 1; - // The finality status of the data in the batch. - DataFinality finality = 2; - // The stream data. - repeated bytes data = 3; - // Cursor used to produced the batch. - Cursor cursor = 4; -} - -// Sent to clients to check if stream is still connected. -message Heartbeat {} - -// Request for the `Status` method. -message StatusRequest {} - -// Response for the `Status` method. -message StatusResponse { - // The current head of the chain. - Cursor current_head = 1; - // The last cursor that was ingested by the node. - Cursor last_ingested = 2; -} diff --git a/core/proto/quota/v1/quota.proto b/core/proto/quota/v1/quota.proto deleted file mode 100644 index 07ae7321..00000000 --- a/core/proto/quota/v1/quota.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package apibara.quota.v1; - -service Quota { - rpc Check(CheckRequest) returns (CheckResponse) {}; - rpc UpdateAndCheck(UpdateAndCheckRequest) returns (UpdateAndCheckResponse) {}; -} - -enum QuotaStatus { - QUOTA_STATUS_UNKNOWN = 0; - QUOTA_STATUS_OK = 1; - QUOTA_STATUS_EXCEEDED = 2; -} - -message CheckRequest { - string team_name = 1; - optional string client_name = 2; - string network = 3; -} - -message CheckResponse { - QuotaStatus status = 1; -} - -message UpdateAndCheckRequest { - string team_name = 1; - optional string client_name = 2; - string network = 3; - uint64 data_units = 4; -} - -message UpdateAndCheckResponse { - QuotaStatus status = 1; -} - diff --git a/core/proto/starknet/v1alpha2/filter.proto b/core/proto/starknet/v1alpha2/filter.proto deleted file mode 100644 index 95372757..00000000 --- a/core/proto/starknet/v1alpha2/filter.proto +++ /dev/null @@ -1,194 +0,0 @@ -syntax = "proto3"; - -package apibara.starknet.v1alpha2; - -import "v1alpha2/types.proto"; - -// Filter describing what data to return for each block. -message Filter { - // Header information. - HeaderFilter header = 1; - // Transactions. - repeated TransactionFilter transactions = 2; - // State update. - StateUpdateFilter state_update = 3; - // Emitted events. - repeated EventFilter events = 4; - // Messages from L2 to L1. - repeated L2ToL1MessageFilter messages = 5; -} - -// Filter header. -message HeaderFilter { - // If true, only include headers if any other filter matches. - bool weak = 1; -} - -// Filter transactions. -// -// An empty transaction filter matches _any_ transaction. -message TransactionFilter { - oneof filter { - InvokeTransactionV0Filter invoke_v0 = 1; - InvokeTransactionV1Filter invoke_v1 = 2; - DeployTransactionFilter deploy = 3; - DeclareTransactionFilter declare = 4; - L1HandlerTransactionFilter l1_handler = 5; - DeployAccountTransactionFilter deploy_account = 6; - InvokeTransactionV3Filter invoke_v3 = 8; - } - - // Include reverted transactions. - bool include_reverted = 7; -} - -// Receive invoke transactions, v0 -message InvokeTransactionV0Filter { - // Filter by contract address. - FieldElement contract_address = 1; - // Filter by selector. - FieldElement entry_point_selector = 2; - // Filter by calldata prefix. - repeated FieldElement calldata = 3; -} - -// Receive invoke transactions, v1 -message InvokeTransactionV1Filter { - // Filter by sender address. - FieldElement sender_address = 1; - // Filter by calldata prefix. - repeated FieldElement calldata = 3; -} - -// Receive invoke transactions, v3 -message InvokeTransactionV3Filter { - // Filter by sender address. - FieldElement sender_address = 1; - // Filter by calldata prefix. - repeated FieldElement calldata = 2; -} - -// Receive deploy transactions. -message DeployTransactionFilter { - // Filter by contract address salt. - FieldElement contract_address_salt = 1; - // Filter by class hash. - FieldElement class_hash = 2; - // Filter by calldata prefix. - repeated FieldElement constructor_calldata = 4; -} - -// Receive declare transactions. -message DeclareTransactionFilter { - // Filter by class hash. - FieldElement class_hash = 1; - // Filter by sender address. - FieldElement sender_address = 2; -} - -// Receive l1 handler transactions. -message L1HandlerTransactionFilter { - // Filter by contract address. - FieldElement contract_address = 1; - // Filter by selector. - FieldElement entry_point_selector = 2; - // Filter by calldata prefix. - repeated FieldElement calldata = 3; -} - -// Receive deploy account transactions. -message DeployAccountTransactionFilter { - // Filter by contract address salt. - FieldElement contract_address_salt = 1; - // Filter by class hash. - FieldElement class_hash = 2; - // Filter by calldata prefix. - repeated FieldElement constructor_calldata = 4; -} - -// Filter L2 to L1 messages. -message L2ToL1MessageFilter { - // Filter by destination address. - FieldElement to_address = 1; - // Filter payloads that prefix-match the given data. - repeated FieldElement payload = 2; - // Include messages sent by reverted transactions. - bool include_reverted = 3; -} - -// Filter events. -message EventFilter { - // Filter by contract emitting the event. - FieldElement from_address = 1; - // Filter keys that prefix-match the given data. - repeated FieldElement keys = 2; - // Filter data that prefix-match the given data. - repeated FieldElement data = 3; - // Include events emitted by reverted transactions. - optional bool include_reverted = 4; - // Include the transaction that emitted the event. Defaults to true. - optional bool include_transaction = 5; - // Include the receipt of the transaction that emitted the event. Defaults to - // true. - optional bool include_receipt = 6; -} - -// Filter state update data. -message StateUpdateFilter { - // Filter storage changes. - repeated StorageDiffFilter storage_diffs = 1; - // Filter declared contracts. - repeated DeclaredContractFilter declared_contracts = 2; - // Filter deployed contracts. - repeated DeployedContractFilter deployed_contracts = 3; - // Filter nonces updates. - repeated NonceUpdateFilter nonces = 4; - // Filter declared classes. - repeated DeclaredClassFilter declared_classes = 5; - // Filter replaced classes. - repeated ReplacedClassFilter replaced_classes = 6; -} - -// Filter storage changes. -message StorageDiffFilter { - // Filter by contract address. - FieldElement contract_address = 1; -} - -// Filter declared contracts. -message DeclaredContractFilter { - // Filter by class hash. - FieldElement class_hash = 1; -} - -// Filter declared classes. -message DeclaredClassFilter { - // Filter by class hash. - FieldElement class_hash = 1; - // Filter by compiled class hash. - FieldElement compiled_class_hash = 2; -} - -// Filter replaced classes. -message ReplacedClassFilter { - // Filter by contract address. - FieldElement contract_address = 1; - // Filter by class hash. - FieldElement class_hash = 2; -} - -// Filter deployed contracts. -message DeployedContractFilter { - // Filter by contract address. - FieldElement contract_address = 1; - // Filter by class hash. - FieldElement class_hash = 2; -} - -// Filter nonce updates. -message NonceUpdateFilter { - // Filter by contract address. - FieldElement contract_address = 1; - // Filter by new nonce value. - FieldElement nonce = 2; -} diff --git a/core/proto/starknet/v1alpha2/starknet.proto b/core/proto/starknet/v1alpha2/starknet.proto deleted file mode 100644 index 82f222e0..00000000 --- a/core/proto/starknet/v1alpha2/starknet.proto +++ /dev/null @@ -1,479 +0,0 @@ -// Apibara StarkNet Support -syntax = "proto3"; - -package apibara.starknet.v1alpha2; - -import "google/protobuf/timestamp.proto"; -import "v1alpha2/types.proto"; - -// A StarkNet block. -message Block { - // Block status. - BlockStatus status = 1; - // Block header. - BlockHeader header = 2; - // Transactions in the block. - repeated TransactionWithReceipt transactions = 3; - // State update caused by the block. - StateUpdate state_update = 4; - // Events emitted in the block. - repeated EventWithTransaction events = 5; - // Messages to L1 sent in the block. - repeated L2ToL1MessageWithTransaction l2_to_l1_messages = 6; - // Whether the block contains data. - bool empty = 7; -} - -// Block header. -message BlockHeader { - // Hash of the block. - FieldElement block_hash = 1; - // Hash of the block's parent. - FieldElement parent_block_hash = 2; - // Block height. - uint64 block_number = 3; - // Sequencer address. - FieldElement sequencer_address = 4; - // New state root after the block. - FieldElement new_root = 5; - // Timestamp when block was produced. - google.protobuf.Timestamp timestamp = 6; - // Starknet version. - string starknet_version = 7; - // Price of L1 gas in the block. - ResourcePrice l1_gas_price = 8; - // Price of L1 data gas in the block. - ResourcePrice l1_data_gas_price = 9; - // L1 data availability mode. - L1DataAvailabilityMode l1_data_availability_mode = 10; -} - -enum L1DataAvailabilityMode { - // Unknown DA. - L1_DATA_AVAILABILITY_MODE_UNSPECIFIED = 0; - // Data published via blobs. - L1_DATA_AVAILABILITY_MODE_BLOB = 1; - // Data published via calldata. - L1_DATA_AVAILABILITY_MODE_CALLDATA = 2; -} - -// Status of a block. -enum BlockStatus { - // Unknown block status. - BLOCK_STATUS_UNSPECIFIED = 0; - // Block not accepted yet. - BLOCK_STATUS_PENDING = 1; - // Block accepted on L2. - BLOCK_STATUS_ACCEPTED_ON_L2 = 2; - // Block finalized on L1. - BLOCK_STATUS_ACCEPTED_ON_L1 = 3; - // Block was rejected and is not part of the canonical chain anymore. - BLOCK_STATUS_REJECTED = 4; -} - -// A transaction with its receipt. -message TransactionWithReceipt { - // The transaction - Transaction transaction = 1; - // The transaction receipt. - TransactionReceipt receipt = 2; -} - -// A transaction. -message Transaction { - // Common transaction metadata. - TransactionMeta meta = 1; - oneof transaction { - // Transaction invoking a smart contract, V0. - InvokeTransactionV0 invoke_v0 = 2; - // Transaction invoking a smart contract, V1. - InvokeTransactionV1 invoke_v1 = 3; - // Transaction deploying a new smart contract, V1. - DeployTransaction deploy = 4; - // Transaction declaring a smart contract. - DeclareTransaction declare = 5; - // Transaction handling a message from L1. - L1HandlerTransaction l1_handler = 6; - // Transaction deploying a new account. - DeployAccountTransaction deploy_account = 7; - // Transaction deploying a new smart contract, V3. - DeployAccountTransactionV3 deploy_account_v3 = 8; - // Transaction invoking a smart contract, V3. - InvokeTransactionV3 invoke_v3 = 9; - // Transaction declaring a smart contract, V3. - DeclareTransactionV3 declare_v3 = 10; - } -} - -// Common transaction metadata. -message TransactionMeta { - // Transaction hash. - FieldElement hash = 1; - // Maximum fee to be paid. - FieldElement max_fee = 2; - // Signature by the user. - repeated FieldElement signature = 3; - // Nonce. - FieldElement nonce = 4; - // Version. - uint64 version = 5; - // Transaction resources. - ResourceBoundsMapping resource_bounds = 6; - // Tip to the sequencer. - uint64 tip = 7; - // Data passed to the paymaster. - repeated FieldElement paymaster_data = 8; - // The storage domain of the account's nonce. - DataAvailabilityMode nonce_data_availability_mode = 9; - // The storage domain of the account's balance from which fee will be charged. - DataAvailabilityMode fee_data_availability_mode = 10; - // Index of the transaction in the block. - uint64 transaction_index = 11; -} - -// Transaction invoking a smart contract, V0. -message InvokeTransactionV0 { - // Target contract address. - FieldElement contract_address = 1; - // Selector of the function being invoked. - FieldElement entry_point_selector = 2; - // Raw calldata. - repeated FieldElement calldata = 3; -} - -// Transaction invoking a smart contract, V1. -message InvokeTransactionV1 { - // Address sending the transaction. - FieldElement sender_address = 1; - // Raw calldata. - repeated FieldElement calldata = 2; -} - -// Transaction invoking a smart contract, V3. -message InvokeTransactionV3 { - // Address sending the transaction. - FieldElement sender_address = 1; - // Raw calldata. - repeated FieldElement calldata = 2; - // Data passed to the account deployment. - repeated FieldElement account_deployment_data = 3; -} - -// Transaction deploying a new smart contract, V1. -message DeployTransaction { - // Raw calldata passed to the constructor. - repeated FieldElement constructor_calldata = 2; - // Salt used when computing the contract's address. - FieldElement contract_address_salt = 3; - // Hash of the class being deployed. - FieldElement class_hash = 4; -} - -// Transaction declaring a smart contract. -message DeclareTransaction { - // Class hash. - FieldElement class_hash = 1; - // Address of the account declaring the class. - FieldElement sender_address = 2; - // The hash of the cairo assembly resulting from the sierra compilation. - FieldElement compiled_class_hash = 3; -} - -// Transaction declaring a smart contract. -message DeclareTransactionV3 { - // Class hash. - FieldElement class_hash = 1; - // Address of the account declaring the class. - FieldElement sender_address = 2; - // The hash of the cairo assembly resulting from the sierra compilation. - FieldElement compiled_class_hash = 3; - // Data passed to the account deployment. - repeated FieldElement account_deployment_data = 4; -} - -// Transaction handling a message from L1. -message L1HandlerTransaction { - // Target contract address. - FieldElement contract_address = 2; - // Selector of the function being invoked. - FieldElement entry_point_selector = 3; - // Raw calldata. - repeated FieldElement calldata = 4; -} - -// Transaction deploying a new account. -message DeployAccountTransaction { - // Raw calldata passed to the constructor. - repeated FieldElement constructor_calldata = 2; - // Salt used when computing the contract's address. - FieldElement contract_address_salt = 3; - // Hash of the class being deployed. - FieldElement class_hash = 4; -} - -// Transaction deploying a new smart contract, V3. -message DeployAccountTransactionV3 { - // Raw calldata passed to the constructor. - repeated FieldElement constructor_calldata = 1; - // Salt used when computing the contract's address. - FieldElement contract_address_salt = 2; - // Hash of the class being deployed. - FieldElement class_hash = 3; -} - -// Transaction execution status. -enum ExecutionStatus { - // Unknown execution status. - EXECUTION_STATUS_UNSPECIFIED = 0; - // Transaction succeeded. - EXECUTION_STATUS_SUCCEEDED = 1; - // Transaction reverted. - EXECUTION_STATUS_REVERTED = 2; -} - -// Result of the execution of a transaction. -// -// This message only contains the receipt data, if you also need the -// transaction, request a `Transaction`. -message TransactionReceipt { - // Hash of the transaction. - FieldElement transaction_hash = 1; - // Transaction's indexe in the list of transactions in a block. - uint64 transaction_index = 2; - // Feed paid. - FieldElement actual_fee = 3; - // Messages sent to L1 in the transactions. - repeated L2ToL1Message l2_to_l1_messages = 4; - // Events emitted in the transaction. - repeated Event events = 5; - // Address of the contract that was created by the transaction. - FieldElement contract_address = 6; - // Transaction execution status. - ExecutionStatus execution_status = 7; - // The reason why the transaction reverted. - string revert_reason = 8; - // Feed paid. - FeePayment actual_fee_paid = 9; - // The resources consumed by the transaction. - ExecutionResources execution_resources = 10; -} - -// Message sent from L2 to L1 together with its transaction and receipt. -message L2ToL1MessageWithTransaction { - // The transaction that sent this message. - Transaction transaction = 1; - // The transaction receipt. - TransactionReceipt receipt = 2; - // The message. - L2ToL1Message message = 3; -} - -// Message sent from L2 to L1. -message L2ToL1Message { - // Destination address (L1 contract). - FieldElement to_address = 3; - // Data contained in the message. - repeated FieldElement payload = 4; - // Index in the transaction receipt. - uint64 index = 5; - // Sender address (L2 contract). - FieldElement from_address = 6; -} - -// Event emitted by a transaction, together with its transaction and receipt. -message EventWithTransaction { - // The transaction emitting the event. - Transaction transaction = 1; - // The transaction receipt. - TransactionReceipt receipt = 2; - // The event. - Event event = 3; -} - -// Event emitted by a transaction. -message Event { - // Address of the smart contract emitting the event. - FieldElement from_address = 1; - // Event key. - repeated FieldElement keys = 2; - // Event data. - repeated FieldElement data = 3; - // Index in the transaction receipt. - uint64 index = 4; -} - -// State update. -message StateUpdate { - // New state root. - FieldElement new_root = 1; - // Previous state root. - FieldElement old_root = 2; - // State difference. - StateDiff state_diff = 3; -} - -// Difference in state between blocks. -message StateDiff { - // Storage differences. - repeated StorageDiff storage_diffs = 1; - // Contracts declared. - repeated DeclaredContract declared_contracts = 2; - // Contracts deployed. - repeated DeployedContract deployed_contracts = 3; - // Nonces updated. - repeated NonceUpdate nonces = 4; - // Classes declared. - repeated DeclaredClass declared_classes = 5; - // Replaced declared. - repeated ReplacedClass replaced_classes = 6; -} - -// Difference in storage values for a contract. -message StorageDiff { - // The contract address. - FieldElement contract_address = 1; - // Entries that changed. - repeated StorageEntry storage_entries = 2; -} - -// Storage entry. -message StorageEntry { - // Storage location. - FieldElement key = 1; - // Storage value. - FieldElement value = 2; -} - -// Contract declared. -message DeclaredContract { - // Class hash of the newly declared contract. - FieldElement class_hash = 1; -} - -// Class declared. -message DeclaredClass { - // Class hash of the newly declared class. - FieldElement class_hash = 1; - // Hash of the cairo assembly resulting from the sierra compilation. - FieldElement compiled_class_hash = 2; -} - -// Class replaced. -message ReplacedClass { - // The address of the contract whose class was replaced. - FieldElement contract_address = 1; - // The new class hash. - FieldElement class_hash = 2; -} - -// Contract deployed. -message DeployedContract { - // Address of the newly deployed contract. - FieldElement contract_address = 1; - // Class hash of the deployed contract. - FieldElement class_hash = 2; -} - -// Nonce update. -message NonceUpdate { - // Contract address. - FieldElement contract_address = 1; - // New nonce value. - FieldElement nonce = 2; -} - -// Price of a unit of a resource. -message ResourcePrice { - // Price in fri (10^-18 strk). - FieldElement price_in_fri = 1; - // Price in wei (10^-18 eth). - FieldElement price_in_wei = 2; -} - -// A Starknet fee payment. -message FeePayment { - // Amount paid. - FieldElement amount = 1; - // Unit of the amount. - PriceUnit unit = 2; -} - -// Price unit. -enum PriceUnit { - // Unknown price unit. - PRICE_UNIT_UNSPECIFIED = 0; - // WEI. - PRICE_UNIT_WEI = 1; - // FRI. - PRICE_UNIT_FRI = 2; -} - -// Execution resources. -message ExecutionResources { - // Computation resources. - ComputationResources computation = 1; - // Data availability resources. - DataAvailabilityResources data_availability = 2; -} - -// Computation resources. -message ComputationResources { - // The number of Cairo steps used. - uint64 steps = 1; - // The number of unused memory cells. - uint64 memory_holes = 2; - // The number of RANGE_CHECK builtin instances. - uint64 range_check_builtin_applications = 3; - // The number of Pedersen builtin instances. - uint64 pedersen_builtin_applications = 4; - // The number of Poseidon builtin instances. - uint64 poseidon_builtin_applications = 5; - // The number of EC_OP builtin instances. - uint64 ec_op_builtin_applications = 6; - // The number of ECDSA builtin instances. - uint64 ecdsa_builtin_applications = 7; - // The number of BITWISE builtin instances. - uint64 bitwise_builtin_applications = 8; - // The number of KECCAK builtin instances. - uint64 keccak_builtin_applications = 9; - // The number of accesses to the segment arena. - uint64 segment_arena_builtin = 10; -} - -message DataAvailabilityResources { - // The gas consumed by this transaction's data, 0 if it uses data gas for DA. - uint64 l1_gas = 1; - // The data gas consumed by this transaction's data, 0 if it uses gas for DA. - uint64 l1_data_gas = 2; -} - -message ResourceBoundsMapping { - // Maximum amount and price of L1 gas. - ResourceBounds l1_gas = 1; - // Maximum amount and price of L2 gas. - ResourceBounds l2_gas = 2; -} - -message ResourceBounds { - // The maximum amount of resources that can be consumed by a transaction. - uint64 max_amount = 1; - /// The max price per unit of resource. - Uint128 max_price_per_unit = 2; -} - -message Uint128 { - // The low 64 bits of the number. - uint64 low = 1; - // The high 64 bits of the number. - uint64 high = 2; -} - -// DA mode. -enum DataAvailabilityMode { - // Unknown DA. - DATA_AVAILABILITY_MODE_UNSPECIFIED = 0; - // L1. - DATA_AVAILABILITY_MODE_L1 = 1; - // L2. - DATA_AVAILABILITY_MODE_L2 = 2; -} diff --git a/core/proto/starknet/v1alpha2/types.proto b/core/proto/starknet/v1alpha2/types.proto deleted file mode 100644 index e781660b..00000000 --- a/core/proto/starknet/v1alpha2/types.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -package apibara.starknet.v1alpha2; - -// StarkNet field element. -// -// Encoded as 4 packed uint64 -message FieldElement { - fixed64 lo_lo = 1; - fixed64 lo_hi = 2; - fixed64 hi_lo = 3; - fixed64 hi_hi = 4; -} diff --git a/core/src/filter.rs b/core/src/filter.rs deleted file mode 100644 index 42db5c45..00000000 --- a/core/src/filter.rs +++ /dev/null @@ -1,7 +0,0 @@ -use prost::Message; -use serde::{de::DeserializeOwned, ser::Serialize}; - -pub trait Filter: Default + Message + Clone + DeserializeOwned + Serialize { - /// Merges the given filter into this filter. - fn merge_filter(&mut self, other: Self); -} diff --git a/core/src/lib.rs b/core/src/lib.rs deleted file mode 100644 index 753ef283..00000000 --- a/core/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod filter; -pub mod node; -pub mod quota; -pub mod starknet; -pub mod stream; diff --git a/core/src/quota.rs b/core/src/quota.rs deleted file mode 100644 index 0e55bdb7..00000000 --- a/core/src/quota.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod v1 { - tonic::include_proto!("apibara.quota.v1"); -} diff --git a/core/src/starknet/data.rs b/core/src/starknet/data.rs deleted file mode 100644 index ce9ca0f1..00000000 --- a/core/src/starknet/data.rs +++ /dev/null @@ -1,219 +0,0 @@ -use std::{ - fmt::Display, - hash::{Hash, Hasher}, -}; - -use serde::{ - de::{Deserialize, Deserializer}, - ser::{Serialize, Serializer}, -}; -use starknet::core::types::{FieldElement as Felt, FromByteArrayError}; - -use super::proto::v1alpha2::*; - -impl BlockStatus { - pub fn is_finalized(&self) -> bool { - *self == BlockStatus::AcceptedOnL1 - } - - pub fn is_accepted(&self) -> bool { - *self == BlockStatus::AcceptedOnL2 - } - - pub fn is_rejected(&self) -> bool { - *self == BlockStatus::Rejected - } -} - -#[derive(Debug, thiserror::Error)] -pub enum FieldElementDecodeError { - #[error("missing 0x prefix")] - MissingPrefix, - #[error("field element size is invalid")] - InvalidSize, - #[error("hex decode error: {0}")] - DecodeError(#[from] hex::FromHexError), -} - -impl FieldElement { - /// Returns a new field element representing the given u64 value. - pub fn from_u64(value: u64) -> FieldElement { - FieldElement { - lo_lo: 0, - lo_hi: 0, - hi_lo: 0, - hi_hi: value, - } - } - - /// Returns a new field element from the raw byte representation. - pub fn from_bytes(bytes: &[u8; 32]) -> Self { - let lo_lo = u64::from_be_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], - ]); - let lo_hi = u64::from_be_bytes([ - bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14], bytes[15], - ]); - let hi_lo = u64::from_be_bytes([ - bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22], bytes[23], - ]); - let hi_hi = u64::from_be_bytes([ - bytes[24], bytes[25], bytes[26], bytes[27], bytes[28], bytes[29], bytes[30], bytes[31], - ]); - - FieldElement { - lo_lo, - lo_hi, - hi_lo, - hi_hi, - } - } - - /// Returns a new field element from the raw byte representation. - pub fn from_slice(bytes: &[u8]) -> Result { - // number is too big - let size = bytes.len(); - if size > 32 { - return Err(FieldElementDecodeError::InvalidSize); - } - let mut bytes_array = [0u8; 32]; - bytes_array[32 - size..].copy_from_slice(bytes); - Ok(FieldElement::from_bytes(&bytes_array)) - } - - pub fn from_hex(s: &str) -> Result { - // must be at least 0x - if !s.starts_with("0x") { - return Err(FieldElementDecodeError::MissingPrefix); - } - - // hex requires the string to be even-sized. If it's not, we copy it and add a leading 0. - let bytes = if s.len() % 2 == 1 { - let even_sized = format!("0{}", &s[2..]); - hex::decode(even_sized)? - } else { - // skip 0x prefix - hex::decode(&s[2..])? - }; - - Self::from_slice(&bytes) - } - - pub fn to_bytes(&self) -> [u8; 32] { - let lo_lo = self.lo_lo.to_be_bytes(); - let lo_hi = self.lo_hi.to_be_bytes(); - let hi_lo = self.hi_lo.to_be_bytes(); - let hi_hi = self.hi_hi.to_be_bytes(); - [ - lo_lo[0], lo_lo[1], lo_lo[2], lo_lo[3], lo_lo[4], lo_lo[5], lo_lo[6], lo_lo[7], - lo_hi[0], lo_hi[1], lo_hi[2], lo_hi[3], lo_hi[4], lo_hi[5], lo_hi[6], lo_hi[7], - hi_lo[0], hi_lo[1], hi_lo[2], hi_lo[3], hi_lo[4], hi_lo[5], hi_lo[6], hi_lo[7], - hi_hi[0], hi_hi[1], hi_hi[2], hi_hi[3], hi_hi[4], hi_hi[5], hi_hi[6], hi_hi[7], - ] - } - - /// Returns the field element as an hex string with 0x prefix. - pub fn to_hex(&self) -> String { - format!("0x{}", hex::encode(self.to_bytes())) - } -} - -impl Display for FieldElement { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.to_hex()) - } -} - -impl Hash for FieldElement { - fn hash(&self, state: &mut H) { - self.to_bytes().hash(state); - } -} - -impl Serialize for FieldElement { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_hex()) - } -} - -impl<'de> Deserialize<'de> for FieldElement { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - let fe = FieldElement::from_hex(&s).map_err(serde::de::Error::custom)?; - Ok(fe) - } -} - -impl TryFrom<&FieldElement> for Felt { - type Error = FromByteArrayError; - - fn try_from(value: &FieldElement) -> Result { - Felt::from_bytes_be(&value.to_bytes()) - } -} - -impl From for FieldElement { - fn from(felt: Felt) -> Self { - (&felt).into() - } -} - -impl From<&Felt> for FieldElement { - fn from(felt: &Felt) -> Self { - let bytes = felt.to_bytes_be(); - FieldElement::from_bytes(&bytes) - } -} - -#[cfg(test)] -mod tests { - use quickcheck_macros::quickcheck; - - use starknet::core::types::FieldElement as Felt; - - use crate::starknet::v1alpha2::FieldElement; - - #[quickcheck] - fn test_felt_from_u64(num: u64) { - let felt = FieldElement::from_u64(num); - let bytes = felt.to_bytes(); - // since it's a u64 it will never use bytes 0..24 - assert_eq!(bytes[0..24], [0; 24]); - - let back = FieldElement::from_bytes(&bytes); - assert_eq!(back, felt); - - let as_hex = felt.to_hex(); - let back_hex = FieldElement::from_hex(&as_hex).unwrap(); - assert_eq!(felt, back_hex); - } - - #[test] - fn test_conversion_to_felt() { - let two = Felt::MAX; - let felt: FieldElement = two.into(); - assert_eq!(felt.lo_lo, 576460752303423505); - assert_eq!(felt.lo_hi, 0); - assert_eq!(felt.hi_lo, 0); - assert_eq!(felt.hi_hi, 0); - - let as_hex = felt.to_hex(); - let back = FieldElement::from_hex(&as_hex).unwrap(); - assert_eq!(felt, back); - } - - #[test] - fn test_from_hex() { - let felt = FieldElement::from_hex("0x1").unwrap(); - assert_eq!(felt.lo_lo, 0); - assert_eq!(felt.lo_hi, 0); - assert_eq!(felt.hi_lo, 0); - assert_eq!(felt.hi_hi, 1); - } -} diff --git a/core/src/starknet/filter.rs b/core/src/starknet/filter.rs deleted file mode 100644 index 014ca6cc..00000000 --- a/core/src/starknet/filter.rs +++ /dev/null @@ -1,683 +0,0 @@ -use super::proto::v1alpha2::*; -use crate::filter::Filter as FilterTrait; - -impl HeaderFilter { - /// Create an header filter that always matches an header. - pub fn new() -> Self { - HeaderFilter { weak: false } - } - - /// Create an header filter that returns an header only if other filters match. - pub fn weak() -> Self { - HeaderFilter { weak: true } - } -} - -impl Filter { - /// Configure filter header. - pub fn with_header(&mut self, header: HeaderFilter) -> &mut Self { - self.header = Some(header); - self - } - - /// With specific state update. - pub fn with_state_update(&mut self, state_udpate: StateUpdateFilter) -> &mut Self { - self.state_update = Some(state_udpate); - self - } - - /// Add event to subscribe to. - pub fn add_event(&mut self, closure: F) -> &mut Self - where - F: Fn(EventFilter) -> EventFilter, - { - self.events.push(closure(EventFilter::default())); - self - } - - /// Add transaction to filter. - pub fn add_transaction(&mut self, closure: F) -> &mut Self - where - F: Fn(TransactionFilter) -> TransactionFilter, - { - self.transactions - .push(closure(TransactionFilter::default())); - self - } - - /// Add message to filter. - pub fn add_message(&mut self, closure: F) -> &mut Self - where - F: Fn(L2ToL1MessageFilter) -> L2ToL1MessageFilter, - { - self.messages.push(closure(L2ToL1MessageFilter::default())); - self - } - - /// Build final version of Filter - pub fn build(&mut self) -> Self { - // As the ::prost::Message already impl Default trait and doesn't seems to be overridable - // this a workaround to set the default value. - // HeaderFilter needs to be set to a value in order to correctly stream data - if self.header.is_none() { - self.header = Some(HeaderFilter::weak()); - } - self.clone() - } -} - -impl TransactionFilter { - /// Create `InvokeTransactionV0Filter` from `TransactionFilter` - pub fn invoke_transaction_v0(&mut self, closure: F) -> &mut Self - where - F: Fn(InvokeTransactionV0Filter) -> InvokeTransactionV0Filter, - { - self.filter = Some(transaction_filter::Filter::InvokeV0(closure( - InvokeTransactionV0Filter::default(), - ))); - self - } - - /// Create `InvokeTransactionV1Filter` from `TransactionFilter` - pub fn invoke_transaction_v1(&mut self, closure: F) -> &mut Self - where - F: Fn(InvokeTransactionV1Filter) -> InvokeTransactionV1Filter, - { - self.filter = Some(transaction_filter::Filter::InvokeV1(closure( - InvokeTransactionV1Filter::default(), - ))); - self - } - - /// Create `DeployTransactionFilter` from `TransactionFilter` - pub fn deploy_transaction(&mut self, closure: F) -> &mut Self - where - F: Fn(DeployTransactionFilter) -> DeployTransactionFilter, - { - self.filter = Some(transaction_filter::Filter::Deploy(closure( - DeployTransactionFilter::default(), - ))); - self - } - - /// Create `DeclareTransactionFilter` from `TransactionFilter` - pub fn declare_transaction(&mut self, closure: F) -> &mut Self - where - F: Fn(DeclareTransactionFilter) -> DeclareTransactionFilter, - { - self.filter = Some(transaction_filter::Filter::Declare(closure( - DeclareTransactionFilter::default(), - ))); - self - } - - /// Create `L1HandlerTransactionFilter` from `TransactionFilter` - pub fn l1_handler_transaction(&mut self, closure: F) -> &mut Self - where - F: Fn(L1HandlerTransactionFilter) -> L1HandlerTransactionFilter, - { - self.filter = Some(transaction_filter::Filter::L1Handler(closure( - L1HandlerTransactionFilter::default(), - ))); - self - } - - /// Create `DeployAccountTransactionFilter` from `TransactionFilter` - pub fn deploy_account_transaction(&mut self, closure: F) -> &mut Self - where - F: Fn(DeployAccountTransactionFilter) -> DeployAccountTransactionFilter, - { - self.filter = Some(transaction_filter::Filter::DeployAccount(closure( - DeployAccountTransactionFilter::default(), - ))); - self - } - - /// Builds final `TransactionFilter` - pub fn build(&mut self) -> Self { - self.clone() - } -} - -impl InvokeTransactionV0Filter { - /// Filter transaction with contract address. - pub fn with_contract_address(mut self, address: FieldElement) -> Self { - self.contract_address = Some(address); - self - } - - /// Filter with transaction selector. - pub fn with_entry_point_selector(mut self, selector: FieldElement) -> Self { - self.entry_point_selector = Some(selector); - self - } - - /// Filter with call data. - pub fn with_calldata(mut self, calldata: Vec) -> Self { - self.calldata = calldata; - self - } -} - -impl InvokeTransactionV1Filter { - /// Filter transaction with sender address. - pub fn with_sender_address(mut self, address: FieldElement) -> Self { - self.sender_address = Some(address); - self - } - - /// Filter with call data. - pub fn with_calldata(mut self, calldata: Vec) -> Self { - self.calldata = calldata; - self - } -} - -impl DeployTransactionFilter { - /// Filter transaction with contract address salt. - pub fn with_contract_address_salt(mut self, address: FieldElement) -> Self { - self.contract_address_salt = Some(address); - self - } - /// Filter transaction with class hash. - pub fn with_class_hash(mut self, class_hash: FieldElement) -> Self { - self.class_hash = Some(class_hash); - self - } - - /// Filter transaction with constructor calldata. - pub fn with_constructor_calldata(mut self, constructor_calldata: Vec) -> Self { - self.constructor_calldata = constructor_calldata; - self - } -} - -impl DeclareTransactionFilter { - /// Filter transaction with sender address. - pub fn with_sender_address(mut self, address: FieldElement) -> Self { - self.sender_address = Some(address); - self - } - - /// Filter with class hash. - pub fn with_class_hash(mut self, class_hash: FieldElement) -> Self { - self.class_hash = Some(class_hash); - self - } -} - -impl L1HandlerTransactionFilter { - /// Filter transaction with contract address. - pub fn with_contract_address(mut self, address: FieldElement) -> Self { - self.contract_address = Some(address); - self - } - - /// Filter transaction with entry point selector. - pub fn with_entry_point_selector(mut self, selector: FieldElement) -> Self { - self.entry_point_selector = Some(selector); - self - } - - /// Filter transaction with call data. - pub fn with_calldata(mut self, calldata: Vec) -> Self { - self.calldata = calldata; - self - } -} - -impl DeployAccountTransactionFilter { - /// Filter transaction with contract address salt. - pub fn with_contract_address_salt(mut self, address: FieldElement) -> Self { - self.contract_address_salt = Some(address); - self - } - - /// Filter transaction with class hash. - pub fn with_class_hash(mut self, class_hash: FieldElement) -> Self { - self.class_hash = Some(class_hash); - self - } - - /// Filter transaction with calldata. - pub fn with_constructor_calldata(mut self, constructor_calldata: Vec) -> Self { - self.constructor_calldata = constructor_calldata; - self - } -} - -impl EventFilter { - /// Filter event from address. - pub fn with_from_address(mut self, address: FieldElement) -> Self { - self.from_address = Some(address); - self - } - - /// Filter event with key. - pub fn with_keys(mut self, keys: Vec) -> Self { - self.keys = keys; - self - } - - /// Filter event with data. - pub fn with_data(mut self, data: Vec) -> Self { - self.data = data; - self - } -} - -impl L2ToL1MessageFilter { - /// Filter message to address. - pub fn with_to_address(mut self, to: FieldElement) -> Self { - self.to_address = Some(to); - self - } - - /// Filter message with payload. - pub fn with_payload(mut self, payload: Vec) -> Self { - self.payload = payload; - self - } -} - -impl StateUpdateFilter { - /// Add storage diff filter to state update filter. - pub fn add_storage_diff(mut self, closure: F) -> Self - where - F: Fn(StorageDiffFilter) -> StorageDiffFilter, - { - self.storage_diffs - .push(closure(StorageDiffFilter::default())); - self - } - - /// Add declared contract filter to state update. - pub fn add_declared_contract(mut self, closure: F) -> Self - where - F: Fn(DeclaredContractFilter) -> DeclaredContractFilter, - { - self.declared_contracts - .push(closure(DeclaredContractFilter::default())); - self - } - - /// Add deployed contract filter to state update. - pub fn add_deployed_contract(mut self, closure: F) -> Self - where - F: Fn(DeployedContractFilter) -> DeployedContractFilter, - { - self.deployed_contracts - .push(closure(DeployedContractFilter::default())); - self - } - - /// Add nonce update filter to state update. - pub fn add_nonce_update(mut self, closure: F) -> Self - where - F: Fn(NonceUpdateFilter) -> NonceUpdateFilter, - { - self.nonces.push(closure(NonceUpdateFilter::default())); - self - } -} - -impl StorageDiffFilter { - /// Filter with contract address. - pub fn with_contract_address(mut self, address: FieldElement) -> Self { - self.contract_address = Some(address); - self - } -} - -impl DeclaredContractFilter { - /// Filter with class hash. - pub fn with_class_hash(mut self, address: FieldElement) -> Self { - self.class_hash = Some(address); - self - } -} - -impl DeployedContractFilter { - /// Filter with contract address. - pub fn with_contract_address(mut self, address: FieldElement) -> Self { - self.contract_address = Some(address); - self - } - - /// Filter with class hash. - pub fn with_class_hash(mut self, address: FieldElement) -> Self { - self.class_hash = Some(address); - self - } -} - -impl NonceUpdateFilter { - /// Filter with contract address. - pub fn with_contract_address(mut self, address: FieldElement) -> Self { - self.contract_address = Some(address); - self - } - - /// Filter with nonce. - pub fn with_nonce(mut self, nonce: FieldElement) -> Self { - self.nonce = Some(nonce); - self - } -} - -trait VecMatch { - fn prefix_matches(&self, other: &Self) -> bool; -} - -impl VecMatch for Vec -where - T: PartialEq, -{ - fn prefix_matches(&self, other: &Self) -> bool { - if self.is_empty() { - return true; - } - - if self.len() > other.len() { - return false; - } - - for (a, b) in self.iter().zip(other) { - if a != b { - return false; - } - } - - true - } -} - -/// [Option] extension trait to match values. `None` matches anything. -trait FilterMatch { - fn matches(&self, other: &Self) -> bool; -} - -impl FilterMatch for Option { - fn matches(&self, other: &Self) -> bool { - if self.is_none() { - return true; - } - self == other - } -} - -impl TransactionFilter { - pub fn matches(&self, tx: &Transaction) -> bool { - match self.filter.as_ref() { - None => true, - Some(transaction_filter::Filter::InvokeV0(filter)) => filter.matches(tx), - Some(transaction_filter::Filter::InvokeV1(filter)) => filter.matches(tx), - Some(transaction_filter::Filter::InvokeV3(filter)) => filter.matches(tx), - Some(transaction_filter::Filter::Deploy(filter)) => filter.matches(tx), - Some(transaction_filter::Filter::Declare(filter)) => filter.matches(tx), - Some(transaction_filter::Filter::L1Handler(filter)) => filter.matches(tx), - Some(transaction_filter::Filter::DeployAccount(filter)) => filter.matches(tx), - } - } -} - -impl InvokeTransactionV0Filter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::InvokeV0(tx)) => { - self.contract_address.matches(&tx.contract_address) - && self.entry_point_selector.matches(&tx.entry_point_selector) - && self.calldata.prefix_matches(&tx.calldata) - } - _ => false, - } - } -} - -impl InvokeTransactionV1Filter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::InvokeV1(tx)) => { - self.sender_address.matches(&tx.sender_address) - && self.calldata.prefix_matches(&tx.calldata) - } - _ => false, - } - } -} - -impl InvokeTransactionV3Filter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::InvokeV3(tx)) => { - self.sender_address.matches(&tx.sender_address) - && self.calldata.prefix_matches(&tx.calldata) - } - _ => false, - } - } -} - -impl DeployTransactionFilter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::Deploy(tx)) => { - self.class_hash.matches(&tx.class_hash) - && self - .contract_address_salt - .matches(&tx.contract_address_salt) - && self - .constructor_calldata - .prefix_matches(&tx.constructor_calldata) - } - _ => false, - } - } -} - -impl DeclareTransactionFilter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::Declare(tx)) => { - self.class_hash.matches(&tx.class_hash) - && self.sender_address.matches(&tx.sender_address) - } - _ => false, - } - } -} - -impl L1HandlerTransactionFilter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::L1Handler(tx)) => { - self.contract_address.matches(&tx.contract_address) - && self.entry_point_selector.matches(&tx.entry_point_selector) - && self.calldata.prefix_matches(&tx.calldata) - } - _ => false, - } - } -} - -impl DeployAccountTransactionFilter { - pub fn matches(&self, tx: &Transaction) -> bool { - match tx.transaction.as_ref() { - Some(transaction::Transaction::DeployAccount(tx)) => { - self.class_hash.matches(&tx.class_hash) - && self - .contract_address_salt - .matches(&tx.contract_address_salt) - && self - .constructor_calldata - .prefix_matches(&tx.constructor_calldata) - } - _ => false, - } - } -} - -impl EventFilter { - pub fn matches(&self, event: &Event) -> bool { - self.from_address.matches(&event.from_address) - && self.keys.prefix_matches(&event.keys) - && self.data.prefix_matches(&event.data) - } -} - -impl L2ToL1MessageFilter { - pub fn matches(&self, message: &L2ToL1Message) -> bool { - self.to_address.matches(&message.to_address) - && self.payload.prefix_matches(&message.payload) - } -} - -impl StorageDiffFilter { - pub fn matches(&self, storage_diff: &StorageDiff) -> bool { - self.contract_address - .matches(&storage_diff.contract_address) - } -} - -impl DeclaredContractFilter { - pub fn matches(&self, declared_contract: &DeclaredContract) -> bool { - self.class_hash.matches(&declared_contract.class_hash) - } -} - -impl DeployedContractFilter { - pub fn matches(&self, deployed_contract: &DeployedContract) -> bool { - self.contract_address - .matches(&deployed_contract.contract_address) - && self.class_hash.matches(&deployed_contract.class_hash) - } -} - -impl DeclaredClassFilter { - pub fn matches(&self, declared_class: &DeclaredClass) -> bool { - self.class_hash.matches(&declared_class.class_hash) - && self - .compiled_class_hash - .matches(&declared_class.compiled_class_hash) - } -} - -impl ReplacedClassFilter { - pub fn matches(&self, replaced_class: &ReplacedClass) -> bool { - self.contract_address - .matches(&replaced_class.contract_address) - && self.class_hash.matches(&replaced_class.class_hash) - } -} - -impl NonceUpdateFilter { - pub fn matches(&self, nonce: &NonceUpdate) -> bool { - self.contract_address.matches(&nonce.contract_address) && self.nonce.matches(&nonce.nonce) - } -} - -impl FilterTrait for Filter { - fn merge_filter(&mut self, other: Self) { - if let Some(header) = self.header.as_mut() { - if let Some(other) = other.header { - header.merge(other); - } - } else { - self.header = other.header; - } - - self.events.extend(other.events); - self.transactions.extend(other.transactions); - self.messages.extend(other.messages); - - if let Some(state) = self.state_update.as_mut() { - if let Some(other) = other.state_update { - state.merge(other); - } - } else { - self.state_update = other.state_update; - } - } -} - -impl HeaderFilter { - fn merge(&mut self, other: Self) { - self.weak = self.weak && other.weak; - } -} - -impl StateUpdateFilter { - fn merge(&mut self, other: Self) { - self.storage_diffs.extend(other.storage_diffs); - self.declared_contracts.extend(other.declared_contracts); - self.deployed_contracts.extend(other.deployed_contracts); - self.nonces.extend(other.nonces); - } -} - -#[cfg(test)] -mod tests { - use super::{Filter, HeaderFilter}; - use crate::filter::Filter as FilterTrait; - - #[test] - fn test_merge_header() { - { - let mut a = Filter::default().with_header(HeaderFilter::weak()).build(); - let b = Filter::default().with_header(HeaderFilter::weak()).build(); - a.merge_filter(b); - assert!(a.header.unwrap().weak); - } - { - let mut a = Filter::default().with_header(HeaderFilter::new()).build(); - let b = Filter::default().with_header(HeaderFilter::weak()).build(); - a.merge_filter(b); - assert!(!a.header.unwrap().weak); - } - { - let mut a = Filter::default().with_header(HeaderFilter::weak()).build(); - let b = Filter::default().with_header(HeaderFilter::new()).build(); - a.merge_filter(b); - assert!(!a.header.unwrap().weak); - } - { - let mut a = Filter::default().with_header(HeaderFilter::new()).build(); - let b = Filter::default().with_header(HeaderFilter::new()).build(); - a.merge_filter(b); - assert!(!a.header.unwrap().weak); - } - } - - #[test] - fn test_merge_events() { - let mut a = Filter::default().add_event(|ev| ev).build(); - let b = Filter::default() - .add_event(|ev| ev) - .add_event(|ev| ev) - .build(); - a.merge_filter(b); - assert_eq!(a.events.len(), 3); - } - - #[test] - fn test_merge_transactions() { - let mut a = Filter::default().add_transaction(|tx| tx).build(); - let b = Filter::default() - .add_transaction(|tx| tx) - .add_transaction(|tx| tx) - .build(); - a.merge_filter(b); - assert_eq!(a.transactions.len(), 3); - } - - #[test] - fn test_merge_messages() { - let mut a = Filter::default().add_message(|msg| msg).build(); - let b = Filter::default() - .add_message(|msg| msg) - .add_message(|msg| msg) - .build(); - a.merge_filter(b); - assert_eq!(a.messages.len(), 3); - } -} diff --git a/core/src/starknet/mod.rs b/core/src/starknet/mod.rs deleted file mode 100644 index b949ea06..00000000 --- a/core/src/starknet/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod data; -mod filter; -mod proto; - -pub mod v1alpha2 { - pub use super::proto::v1alpha2::*; -} diff --git a/core/src/starknet/proto.rs b/core/src/starknet/proto.rs deleted file mode 100644 index 37317b2d..00000000 --- a/core/src/starknet/proto.rs +++ /dev/null @@ -1,28 +0,0 @@ -pub mod v1alpha2 { - tonic::include_proto!("apibara.starknet.v1alpha2"); - tonic::include_proto!("apibara.starknet.v1alpha2.serde"); - - pub(crate) const FILE_DESCRIPTOR_SET: &[u8] = - tonic::include_file_descriptor_set!("starknet_v1alpha2_descriptor"); - - pub fn starknet_file_descriptor_set() -> &'static [u8] { - FILE_DESCRIPTOR_SET - } -} - -#[cfg(test)] -mod tests { - use super::v1alpha2; - - #[test] - pub fn test_field_element_as_hex_string() { - let fe = v1alpha2::FieldElement::from_u64(0x1234567890abcdef); - let as_hex = serde_json::to_string(&fe).unwrap(); - assert_eq!( - as_hex, - r#""0x0000000000000000000000000000000000000000000000001234567890abcdef""# - ); - let back = serde_json::from_str::(&as_hex).unwrap(); - assert_eq!(fe, back); - } -} diff --git a/core/src/stream.rs b/core/src/stream.rs deleted file mode 100644 index da8b348f..00000000 --- a/core/src/stream.rs +++ /dev/null @@ -1,194 +0,0 @@ -use std::{fmt::Debug, marker::PhantomData, ops::Range}; - -/// Unique id for an input stream. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct StreamId(u64); - -/// Stream message sequence number. -#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Hash, Eq)] -pub struct Sequence(u64); - -/// A range of sequence numbers. The range is non-inclusive. -#[derive(Debug, Clone, PartialEq, Hash, Eq)] -pub struct SequenceRange(Range); - -impl StreamId { - /// Create a `StreamId` from a `u64`. - pub fn from_u64(id: u64) -> StreamId { - StreamId(id) - } - - /// Returns the stream id as `u64`. - pub fn as_u64(&self) -> u64 { - self.0 - } - - /// Returns the stream id as bytes. - pub fn to_bytes(&self) -> [u8; 8] { - self.0.to_be_bytes() - } -} - -impl Sequence { - /// Create a `Sequence` from a `u64`. - pub fn from_u64(n: u64) -> Sequence { - Sequence(n) - } - - /// Returns the sequence number as `u64`. - pub fn as_u64(&self) -> u64 { - self.0 - } - - /// Returns true if the sequence number is 0. - pub fn is_zero(&self) -> bool { - self.0 == 0 - } - - /// Returns the sequence number immediately after. - pub fn successor(&self) -> Sequence { - Sequence(self.0 + 1) - } - - /// Returns the sequence number immediately before. - /// - /// Notice that this will panic if called on `Sequence(0)`. - pub fn predecessor(&self) -> Sequence { - Sequence(self.0 - 1) - } -} - -impl SequenceRange { - /// Creates a new sequence range. - pub fn new_from_u64(start_index: u64, end_index: u64) -> SequenceRange { - SequenceRange(start_index..end_index) - } - - /// Creates a new sequence range. - pub fn new(start_index: &Sequence, end_index: &Sequence) -> SequenceRange { - Self::new_from_u64(start_index.as_u64(), end_index.as_u64()) - } - - /// Returns the lower bound of the range, inclusive. - pub fn start(&self) -> Sequence { - Sequence::from_u64(self.0.start) - } - - /// Returns the upper bound of the range, exclusive. - pub fn end(&self) -> Sequence { - Sequence::from_u64(self.0.end) - } - - /// Returns true if the range contains no items. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl Iterator for SequenceRange { - type Item = Sequence; - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } - - fn next(&mut self) -> Option { - self.0.next().map(Sequence::from_u64) - } -} - -pub trait MessageData: prost::Message + Default + Clone {} - -impl MessageData for T where T: prost::Message + Default + Clone {} - -/// A [MessageData] that is never decoded. -/// -/// Use this in place of a [Vec] of bytes to not lose type safety. -#[derive(Debug, Clone)] -pub struct RawMessageData { - data: Vec, - _phantom: PhantomData, -} - -/// Message sent over the stream. -#[derive(Debug, Clone)] -pub enum StreamMessage { - Invalidate { - sequence: Sequence, - }, - Data { - sequence: Sequence, - data: RawMessageData, - }, - Pending { - sequence: Sequence, - data: RawMessageData, - }, -} - -impl StreamMessage -where - D: MessageData, -{ - /// Creates a new `Invalidate` message. - pub fn new_invalidate(sequence: Sequence) -> Self { - Self::Invalidate { sequence } - } - - /// Creates a new `Data` message. - pub fn new_data(sequence: Sequence, data: RawMessageData) -> Self { - Self::Data { sequence, data } - } - - /// Creates a new `Pending` message. - pub fn new_pending(sequence: Sequence, data: RawMessageData) -> Self { - Self::Pending { sequence, data } - } - - /// Returns the sequence number associated with the message. - pub fn sequence(&self) -> &Sequence { - match self { - Self::Invalidate { sequence } => sequence, - Self::Data { sequence, .. } => sequence, - Self::Pending { sequence, .. } => sequence, - } - } - - /// Returns true if it's a data message. - pub fn is_data(&self) -> bool { - matches!(self, Self::Data { .. }) - } - - /// Returns true if it's an invalidate message. - pub fn is_invalidate(&self) -> bool { - matches!(self, Self::Invalidate { .. }) - } - - /// Returns true if it's a pending message. - pub fn is_pending(&self) -> bool { - matches!(self, Self::Pending { .. }) - } -} - -impl RawMessageData -where - D: MessageData, -{ - /// Creates a new [RawMessageData] from [Vec]. - pub fn from_vec(data: Vec) -> Self { - RawMessageData { - data, - _phantom: PhantomData, - } - } - - /// Returns the bytes content of the message. - pub fn as_bytes(&self) -> &[u8] { - self.data.as_ref() - } - - /// Decodes the raw message to a [prost::Message]. - pub fn to_proto(&self) -> Result { - D::decode(self.data.as_ref()) - } -} diff --git a/docs/integrations/console.mdx b/docs/integrations/console.mdx deleted file mode 100644 index bc5d0c7d..00000000 --- a/docs/integrations/console.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Console Integration -titleShort: Console -description: "Easily debug Apibara scripts using the console sink." -priority: 999 -updatedAt: 2023-08-15 11:00 ---- - -# Console integration - -This integration prints the result of the transform function to standard -output. Use this integration to debug your transformations without having -to setup a connection with the target integration. - - -### Installation - -``` -apibara plugins install sink-console -``` - -### Usage - -Simply set the value of `sinkType` to `console`. - -```ts -export cost config = { - // your config here... - sinkType: "console", - sinkOptions: { - } -} -``` diff --git a/docs/integrations/index.mdx b/docs/integrations/index.mdx deleted file mode 100644 index e7fa7d88..00000000 --- a/docs/integrations/index.mdx +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Apibara Integrations -titleShort: Overview -description: "Build production-grade indexers using Apibara." -priority: 1000 -updatedAt: 2023-08-15 10:00 ---- - -# Apibara integrations - -From our own and our users experience building indexers, we noticed common -patterns when it comes to integrate onchain data with the services that power -modern applications. - -Apibara provides built-in _integrations_ that simplify integrating onchain data -with common services such as web API, databases, and data analytics tools. - - -## Integrations concepts - -All integrations follow these three steps: - - - stream data from a DNA stream by using the user-provided **filter**. - - optionally, each batch of data is **transformed** by running a - JavaScript/Typescript script. This step is powered by Deno, a lightweight - and powerful JavaScript runtime. - - finally, data is sent to the downstream integration. This step looks - different based on the type of integration. - -Notice that all integrations support starting from historical data and then -continue with live (real-time) blocks. This means you can use Apibara to build -both your indexer (which requires historical data), but also notification -services that require the most recent real-time data. - - -## Types of integrations - -Apibara goal is to bring onchain data to any application. At the moment, we -offer three types of integrations: - - -### Web API - - - **serverless functions**: invoke serverless functions for each batch of - data, both historical and live data. Functions are also invoked when a chain - reorganization happens, so that your application can manage them. - - **webhooks**: invoke a webhook for each batch of data, with exactly the payload - you provide. This integration doesn't invoke the HTTP webhook in case of - chain reorganization. - - -### Database mirroring - -Apibara can mirror all onchain data you select to a database of your choice. -This is the easiest and fastest way to build an indexer for your application, -all data is synced automatically and you can focus on other important parts -of your application. - -While some details vary between each database implementation, all database -integrations work as follows: - - - the records returned by the _transform step_ are inserted in the database. - This step is required to return an array of objects. - - Apibara adds the cursor that generated each piece of data. - - When a chain reorganization happens, Apibara removes all records that have - been invalidated. - -We provide integrations for the following two databases: - - - **PostgreSQL**: write data to the _table_ specified by the user. Batch data - is converted to PostgreSQL records using the `json_populate_recordset` - function. Apibara requires a `_cursor` column in the table to keep track of - each batch's cursor, so that data can be invalidated in case of chain - reorganizations. - - **MongoDB**: write data to the _collection_ specified by the user. Data is - converted to BSON and then written to the collection. Apibara adds a - `_cursor` column to each record so that data can be invalidated in case of - chain reorganizations. - -If you'd like us to add a specific database, feel free to [open an issue on -GitHub](https://github.com/apibara/dna/issues). - - -### Dataset generation - -Apibara is the easiest and fastest way to generate all the datasets that your -analytics team needs. Generate exactly the data you need using the _filter_ and -_transform_ steps. After that, the integration will start streaming all -historical data and will keep your dataset updated as the chain moves forward. - -At the moment, datasets can only be generated locally. In the future, we plan -to add the ability to automatically upload the datasets to any S3-compatible -storage. - -Apibara supports generating datasets in the following formats: - - - **Apache Parquet**: generate Parquet files, the schema is automatically - deduced from the first batch of data. Apibara groups multiple blocks of data - into a single file. - diff --git a/docs/integrations/mongo.mdx b/docs/integrations/mongo.mdx deleted file mode 100644 index ff8fbe13..00000000 --- a/docs/integrations/mongo.mdx +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: MongoDB Integration -titleShort: MongoDB -description: "Sync onchain data to your MongoDB database using Apibara." -priority: 699 -updatedAt: 2023-08-29 15:00 ---- - -# MongoDB integration - -The MongoDB integration provides a way to mirror onchain data to a MongoDB -collection of your choice. Data is automatically inserted as it's produced by -the chain, and it's invalidated in case of chain reorganizations. - - - The integration can be used to **populate a collection with data from one or - more networks or smart contracts**. - - Create powerful analytics with MongoDB pipelines. - - Change how collections are queried without re-indexing. - - -### Installation - -``` -apibara plugins install sink-mongo -``` - - -### Configuration - - - `connectionString: string`: the Mongo connection URL of your database. - - `database: string`: the target database name. - - `collectionName: string`: the target collection name. - - `collectionNames: [string]`: a list of target collection names. See the "Multiple Collections" section for more information. - - `entityMode: boolean`: enable entity mode. See the "Entity - storage" section for more information. - - -### Collection schema - -The transformation step is required to return an array of objects. Data is -converted to BSON and then written to the collection. The MongoDB integration -adds a `_cursor` column to each record so that data can be invalidated in case -of chain reorganizations. - - -### Querying data - -When querying data, you should always add the following property to your MongoDB filter -to ensure you get the latest value: - -```ts -{ - "_cursor.to": null, -} -``` - -The "Storage & Data Invalidation" section at the end of this document contains -information on why you need to add this condition to your filter. - - -### Entity storage - -The MongoDB integration works with two types of data: - - - Immutable logs (default): the values returned by the indexer represent - something that doesn't change over time, in other words they're a list of - items. For example, they represent a list of token transfers. - - Mutable entities: the values returned by the indexer represent the state of - an entity at a given block. For example, token balances change block by - block as users transfer the token. - -You can index entities by setting the `entityMode` option to true. When you set -this option, the indexer expects the transform function to return a list of -update operations. An update operation is a JavaScript object with an `entity` -property used to filter which entities should be updated, and an `update` property -with either an [update -document](https://www.mongodb.com/docs/manual/reference/method/db.collection.updateMany/#std-label-updateMany-behavior-update-expressions) -or an [aggregation -pipeline](https://www.mongodb.com/docs/manual/reference/method/db.collection.updateMany/#std-label-updateMany-behavior-aggregation-pipeline). - - -**Example**: our indexer tracks token ownership for an ERC-721 smart contract -together with the number of transactions for each token. -We enable entity storage by setting the `entityMode` option to `true`. -The transform function returns the entities that need update, together with the -operation to update their state. - -```ts -export default function transform(block: Block) { - // Example to show the shape of data returned by transform. - return [ - { - entity: { contract, tokenId: "1" }, - update: { "$set": { owner: "0xA" }, "$inc": { "txCount": 1 } } - }, - { - entity: { contract, tokenId: "2" }, - update: { "$set": { owner: "0xB" }, "$inc": { "txCount": 3 } } - }, - ]; -} -``` - -The integration will iterate through the new entities and update the existing -values (if any) using the following MongoDB pseudo-query: - -```ts -for (const doc of returnValue) { - db.collection.updateMany({ - filter: doc.entity, - update: doc.update, - options: { - upsert: true, - } - }); -} -``` - -Notice that in reality the query is more complex, please refer to the next -section to learn more about how the MongoDB integration stores data. - -### Multiple Collections - -You can write to multiple collections at the same time using the -`collectionNames` option, doing so the transform function should specify what -collection to the write the data to: -- For standard mode, put the returned data in a `data` key and add a - `collection` key, the returned value will look like `{ data: any, collection: - string }` -- For entity mode, just add a `collection` key to the returned object, it - should look like `{ entity: any, collection: string, update: any }` - -### Storage & Data Invalidation - -Storing blockchain data poses an additional challenge since we must be able to -rollback the database state in case of chain reorganizations. -This integration adds an additional `_cursor` field to all documents to track -for which block range a piece of data is valid for. - -```ts -type Cursor = { - /** Block (inclusive) when this piece of data was created. */ - from: number, - /** Block (exclusive) at which this piece of data became invalid. */ - to: number | null, -}; -``` - -It follows that a field is valid at the most recent block if its `_cursor.to` -field is `null`. - -**Example**: we're indexing an ERC-721 token with the following transfers: - - - block: 1000, transfer from 0x0 to 0xA - - block: 1010, transfer from 0xA to 0xB - - block: 1020, transfer from 0xB to 0xC - -If we put the token ownership on a timeline, it looks like the following diagram. - -```txt -1000 1010 1020 ---+-----------------------+---------------------+---- - - - - - - - - [ { owner: "0xA } ) - [ { owner: "0xB" } ) - [ { owner: "0xC" } -``` - -Which translates to the following documents in the MongoDB collection. - -After the first transfer: - -```json -[ - { "owner": "0xA", "_cursor": { "from": 1000, "to": null } } -] -``` - -After the second transfer: - -```json -[ - { "owner": "0xA", "_cursor": { "from": 1000, "to": 1010 } }, - { "owner": "0xB", "_cursor": { "from": 1010, "to": null } } -] -``` - -And after the third transfer: - -```json -[ - { "owner": "0xA", "_cursor": { "from": 1000, "to": 1010 } }, - { "owner": "0xB", "_cursor": { "from": 1010, "to": 1020 } }, - { "owner": "0xC", "_cursor": { "from": 1020, "to": null } } -] -``` - diff --git a/docs/integrations/parquet.mdx b/docs/integrations/parquet.mdx deleted file mode 100644 index 2e3f0bda..00000000 --- a/docs/integrations/parquet.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Parquet Integration -titleShort: Parquet -description: "Create Parquet datasets of onchain data using Apibara." -priority: 698 -updatedAt: 2023-08-15 11:00 ---- - -# Parquet integration - -**Notice**: the Parquet integration is still in the early stage and subject to -change. - -The Parquet integration generates datasets containing onchain data. -Parquet is the standard file format used by analytics applications such as: - - - The Pandas and Polars data frame libraries for Python. - - DuckDB, an in-process SQL Online Analytical Processing (OLAP) database. - - -### Installation - -``` -apibara plugins install sink-parquet -``` - - -### Configuration - - - `outputDir: string`: write the Parquet files to this directory. - - `batchSize: string`: each Parquet file has data for the specified - number of blocks. - diff --git a/docs/integrations/postgres.mdx b/docs/integrations/postgres.mdx deleted file mode 100644 index b13b87b6..00000000 --- a/docs/integrations/postgres.mdx +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: PostgreSQL Integration -titleShort: PostgreSQL -description: "Sync onchain data to your PostgreSQL database using Apibara." -priority: 700 -updatedAt: 2023-08-15 11:00 ---- - -# PostgreSQL integration - -The PostgreSQL integration is used to mirror onchain data to a PostgreSQL -database. Data is automatically inserted as it's produced by the chain, -and it's invalidated in case of chain reorganizations. - - - This integration can be used to **populate SQL tables with data from one or - more networks and smart contracts**. - - Easily integrate with AI libraries such as Langchain. - - Change how tables are joined or queried without re-indexing. - -### Installation - -``` -apibara plugins install sink-postgres -``` - - -### Configuration - -**General** - - - `connectionString: string`: URL used to connect to your PostgreSQL database. - - `tableName: string`: table where data will be inserted. The table must exist and - it must have a schema compatible with the data returned by the transform - step. - - `invalidate: { column: string, value: string }[]`: additional conditions - used when invalidating data. You should use this option when running multiple - indexers writing data to the same table. - - `entityMode: boolean`: enable entity mode. See the next section to learn - more about entity mode. - -**TLS** - - - `noTls: boolean`: disable TLS when connecting to the server. - - `tlsCertificate: string`: path to the PEM-formatted X509 TLS certificate. - - `tlsDisableSystemRoots: boolean`: disable system root certificates. - - `tlsAcceptInvalidCertificates: boolean`: accept invalid TLS certificates. - - `tlsAcceptInvalidHostnames: boolean`: disable hostname validation. - - `tlsUseSni: boolean`: use Server Name Identification (SNI). - -### Table schema - -The target table schema must be compatible with the data returned by the -transformation step. -Batch data is converted to PostgreSQL records using the `json_populate_recordset` -function. Additionally, the PostgreSQL integration **requires a `_cursor` -column** in the table to keep track of each batch's cursor, so that data can be -invalidated in case of chain reorganizations. The type of `_cursor` is different -between standard and entity mode. - -### Standard mode - -In standard mode (default), the data returned by the transform function is -inserted into the target table. -You should use standard mode if you're storing "a list of items," for example -all transfer events of a token or the list of _position changes_ for your smart -contract. - -In standard mode, the type of `_cursor` is `bigint`. - -### Entity mode - -Entity mode is used to store entities in the table. Entities represent the -state of an object at a specific point in time. - -To enable entity mode, set the `entityMode` option to `true`. When using -entity mode, the type of `_cursor` is `int8range` since the integration -must track for which blocks an entity state is valid. - -In entity mode, the behaviour of the transform function changes. The transform -function must return a list of operations: - - - `{ insert: data }`: insert new data into the table. - - `{ update: data, entity: key }`: update the entity uniquely identified by - `key` with the new `data`. - -**Querying data** - -To query the latest state for an entity, add the `upper_inf(_cursor)` condition -to the `WHERE` clause. - -```sql -SELECT * FROM balances WHERE upper_inf(_cursor); -``` - -You can query for the state at a specific block using the `<@` operator. - -```sql -SELECT * FROM balances WHERE 9123456::bigint <@ _cursor -``` - -**Example** - -Consider a smart contract for a game that emits a new `GameStarted(game_id)` -event when a player starts a new game, and `GameEnded(game_id, score)` when -the game ends. - -```ts -export default function transform({ header, events }) { - const { timestamp } = header; - return events.flatMap(({ event }) => { - if (isGameStarted(event)) { - const { game_id } = decodeEvent(event); - return { - insert: { - game_id, - status: "STARTED", - created_at: timestamp, - }, - }; - } else if (isGameEnded(event)) { - const { game_id, score } = decodeEvent(event); - return { - entity: { - game_id, - }, - update: { - score, - status: "ENDED", - updated_at: timestamp, - }, - }; - } else { - return []; - } - }); -} -``` - -### Provider-specific setup - -#### Supabase - -You have two options: - - - disable TLS by adding the `--no-tls=true` flag when running your indexer. - **This isn't recommended for production**. - - download the SSL certificate from your Supabase dashboard (Settings => - Database) and convert it to PEM. - -After downloading the `.crt` certificate from your dashboard, you will have a -`.crt` file in your download folder. This file will be named something like -`prod-ca-2021.pem`. Convert it to PEM using, for example, the `openssl` CLI tool. - -```bash -openssl x509 -in prod-ca-2021.crt -out prod-ca-2021.pem -outform PEM -``` - -Use the `--tls-certificate` (or `sinkOptions.tlsCertificate` in your -script) flag to point to the PEM certificate path. - -#### Neon - -Use the provided connection string. - diff --git a/docs/integrations/webhook.mdx b/docs/integrations/webhook.mdx deleted file mode 100644 index 9f6d4958..00000000 --- a/docs/integrations/webhook.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Webhooks & serverless Functions Integrations -titleShort: Webhook -description: "Invoke a webhook or serverless function using Apibara." -priority: 800 -updatedAt: 2023-08-15 10:00 ---- - -# Webhooks & serverless Functions Integrations - -The webhook integration provides a way to call a predefined HTTP endpoint for -each new batch of data produced by a DNA stream. - -This integration is well suited for the following tasks: - - - **build an indexer** using serverless technologies. You can invoke serverless - functions such as, but not limited to, AWS Lambda, Supabase Functions, - Cloudflare Functions, and Vercel Functions. - - **send notifications** using the Discord or Telegram API. - -### Installation - -``` -apibara plugins install sink-webhook -``` - -### Configuration - - - `targetUrl: string`: URL where the stream data will be posted to. - - `raw: boolean`: if set to `true`, the payload will be the data returned by - the transform function. Otherwise, wrap the data in JSON-object together - with its cursors. diff --git a/etcd-dbg/Cargo.toml b/etcd-dbg/Cargo.toml new file mode 100644 index 00000000..aebf84b2 --- /dev/null +++ b/etcd-dbg/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "apibara-etcd-dbg" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +apibara-etcd = { path = "../etcd" } +apibara-observability = { path = "../observability" } +clap.workspace = true +error-stack.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true +tracing.workspace = true diff --git a/etcd-dbg/src/main.rs b/etcd-dbg/src/main.rs new file mode 100644 index 00000000..7dfb054a --- /dev/null +++ b/etcd-dbg/src/main.rs @@ -0,0 +1,103 @@ +use std::time::Duration; + +use apibara_etcd::{EtcdClient, EtcdClientOptions, LockOptions}; +use apibara_observability::init_opentelemetry; +use clap::{Parser, Subcommand}; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +#[derive(Debug)] +struct CliError; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Cli { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +pub enum Command { + /// ETCD distributed lock. + Lock { + #[arg(long)] + key: String, + #[arg(long, value_delimiter = ',', default_value = "http://localhost:2379")] + endpoints: Vec, + #[arg(long)] + ttl: Option, + #[arg(long)] + prefix: Option, + }, +} + +#[tokio::main] +async fn main() -> Result<(), CliError> { + init_opentelemetry(env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")) + .change_context(CliError) + .attach_printable("failed to initialize opentelemetry")?; + + let args = Cli::parse(); + + match args.command { + Command::Lock { + key, + endpoints, + ttl, + prefix, + } => { + let ct = CancellationToken::new(); + let options = EtcdClientOptions { prefix, auth: None }; + let client = EtcdClient::connect(endpoints, options) + .await + .change_context(CliError) + .attach_printable("failed to connect to etcd")?; + + let lock_options = LockOptions { + ttl: ttl.unwrap_or(60), + }; + + let mut lock_client = client.lock_client(lock_options); + + while !ct.is_cancelled() { + info!("acquiring lock"); + + let Some(mut lock) = lock_client + .lock(key.clone(), ct.clone()) + .await + .change_context(CliError)? + else { + break; + }; + + info!("acquired lock"); + + loop { + tokio::time::sleep(Duration::from_secs(1)).await; + if lock_client + .is_locked(&lock) + .await + .change_context(CliError)? + { + info!("lock is still held"); + lock.keep_alive().await.change_context(CliError)?; + } else { + info!("lock is no longer held"); + break; + } + } + } + } + } + + Ok(()) +} + +impl error_stack::Context for CliError {} + +impl std::fmt::Display for CliError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "cli error") + } +} diff --git a/runners/runner-common/Cargo.toml b/etcd/Cargo.toml similarity index 53% rename from runners/runner-common/Cargo.toml rename to etcd/Cargo.toml index 00b00f79..e1fabd51 100644 --- a/runners/runner-common/Cargo.toml +++ b/etcd/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "apibara-runner-common" +name = "apibara-etcd" version = "0.1.0" edition.workspace = true authors.workspace = true @@ -7,9 +7,9 @@ repository.workspace = true license.workspace = true [dependencies] -prost.workspace = true -tonic.workspace = true error-stack.workspace = true - -[build-dependencies] -tonic-build.workspace = true +etcd-client.workspace = true +futures.workspace = true +tracing.workspace = true +tokio.workspace = true +tokio-util.workspace = true diff --git a/etcd/src/client.rs b/etcd/src/client.rs new file mode 100644 index 00000000..e420b2ca --- /dev/null +++ b/etcd/src/client.rs @@ -0,0 +1,90 @@ +use error_stack::{Result, ResultExt}; + +use crate::{lock::LockOptions, utils::normalize_prefix, watch::WatchClient}; + +pub use etcd_client::StatusResponse; + +use crate::{kv::KvClient, lock::LockClient}; + +#[derive(Debug)] +pub struct EtcdClientError; + +#[derive(Debug, Default)] +pub struct AuthOptions { + pub user: String, + pub password: String, +} + +#[derive(Debug, Default)] +pub struct EtcdClientOptions { + pub prefix: Option, + pub auth: Option, +} + +#[derive(Clone)] +pub struct EtcdClient { + pub(crate) client: etcd_client::Client, + prefix: String, +} + +impl EtcdClient { + pub async fn connect, S: AsRef<[E]>>( + endpoints: S, + options: EtcdClientOptions, + ) -> Result { + let connect_options = if let Some(auth) = options.auth { + etcd_client::ConnectOptions::new() + .with_user(auth.user, auth.password) + .into() + } else { + None + }; + + let client = etcd_client::Client::connect(endpoints, connect_options) + .await + .change_context(EtcdClientError) + .attach_printable("failed to connect to etcd")?; + + let prefix = normalize_prefix(options.prefix); + + Ok(Self { client, prefix }) + } + + pub async fn status(&mut self) -> Result { + self.client + .status() + .await + .change_context(EtcdClientError) + .attach_printable("failed to get etcd status") + } + + pub fn kv_client(&self) -> KvClient { + KvClient { + client: self.client.kv_client(), + prefix: self.prefix.clone(), + } + } + + pub fn watch_client(&self) -> WatchClient { + WatchClient { + client: self.client.watch_client(), + prefix: self.prefix.clone(), + } + } + + pub fn lock_client(&self, options: LockOptions) -> LockClient { + LockClient { + client: self.client.clone(), + prefix: self.prefix.clone(), + options, + } + } +} + +impl error_stack::Context for EtcdClientError {} + +impl std::fmt::Display for EtcdClientError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "etcd client error") + } +} diff --git a/etcd/src/kv.rs b/etcd/src/kv.rs new file mode 100644 index 00000000..0a580411 --- /dev/null +++ b/etcd/src/kv.rs @@ -0,0 +1,60 @@ +use error_stack::{Result, ResultExt}; + +use etcd_client::GetOptions; +pub use etcd_client::{GetResponse, PutResponse}; + +use crate::client::EtcdClientError; + +#[derive(Clone)] +pub struct KvClient { + pub(crate) client: etcd_client::KvClient, + pub(crate) prefix: String, +} + +impl KvClient { + #[tracing::instrument(level = "debug", skip_all, fields(key = prefix.as_ref()))] + pub async fn get_prefix( + &mut self, + prefix: impl AsRef, + ) -> Result { + let prefix = prefix.as_ref(); + let options = GetOptions::new().with_prefix(); + self.client + .get(self.format_key(prefix), options.into()) + .await + .change_context(EtcdClientError) + .attach_printable("failed to get key with prefix from etcd") + .attach_printable_lazy(|| format!("prefix: {}", prefix)) + } + + #[tracing::instrument(level = "debug", skip_all, fields(key = key.as_ref()))] + pub async fn get(&mut self, key: impl AsRef) -> Result { + let key = key.as_ref(); + self.client + .get(self.format_key(key), None) + .await + .change_context(EtcdClientError) + .attach_printable("failed to get key from etcd") + .attach_printable_lazy(|| format!("key: {}", key)) + } + + #[tracing::instrument(level = "debug", skip_all, fields(key = key.as_ref()))] + pub async fn put( + &mut self, + key: impl AsRef, + value: impl AsRef<[u8]>, + ) -> Result { + let key = key.as_ref(); + let value = value.as_ref(); + self.client + .put(self.format_key(key), value, None) + .await + .change_context(EtcdClientError) + .attach_printable("failed to put key to etcd") + .attach_printable_lazy(|| format!("key: {}", key)) + } + + fn format_key(&self, key: &str) -> Vec { + format!("{}{}", self.prefix, key).into_bytes() + } +} diff --git a/etcd/src/lib.rs b/etcd/src/lib.rs new file mode 100644 index 00000000..e5ae50b4 --- /dev/null +++ b/etcd/src/lib.rs @@ -0,0 +1,16 @@ +//! A collection of utilities for working with etcd. +mod client; +mod kv; +mod lock; +mod utils; +mod watch; + +pub use self::client::{ + AuthOptions, EtcdClient, EtcdClientError, EtcdClientOptions, StatusResponse, +}; +pub use self::kv::{GetResponse, KvClient, PutResponse}; +pub use self::lock::{Lock, LockClient, LockOptions}; +pub use self::utils::normalize_prefix; +pub use self::watch::WatchClient; + +pub use etcd_client::LeaseKeepAliveStream; diff --git a/etcd/src/lock.rs b/etcd/src/lock.rs new file mode 100644 index 00000000..6f9f9522 --- /dev/null +++ b/etcd/src/lock.rs @@ -0,0 +1,142 @@ +use std::time::{Duration, Instant}; + +use error_stack::{Result, ResultExt}; +use etcd_client::{LeaseKeeper, LockResponse}; +use tokio_util::sync::CancellationToken; +use tracing::debug; + +use crate::EtcdClientError; + +#[derive(Debug)] +pub struct LockOptions { + pub ttl: i64, +} + +pub struct LockClient { + pub(crate) client: etcd_client::Client, + pub(crate) prefix: String, + pub(crate) options: LockOptions, +} + +pub struct Lock { + inner: LockResponse, + keeper: LeaseKeeper, + lease_id: i64, + min_refresh_interval: Duration, + last_refresh: Instant, +} + +impl LockClient { + #[tracing::instrument(level = "debug", skip_all)] + pub async fn lock( + &mut self, + key: impl AsRef, + ct: CancellationToken, + ) -> Result, EtcdClientError> { + let key = key.as_ref(); + + let lease = self + .client + .lease_grant(self.options.ttl, None) + .await + .change_context(EtcdClientError) + .attach_printable("failed to acquire lock")?; + + let lease_id = lease.id(); + + let (mut keep_alive, _) = self + .client + .lease_keep_alive(lease_id) + .await + .change_context(EtcdClientError) + .attach_printable("failed to keep lease alive")?; + + let options = etcd_client::LockOptions::new().with_lease(lease_id); + + let min_refresh_interval = Duration::from_secs(self.options.ttl as u64 / 2); + + let mut refresh_interval = tokio::time::interval(min_refresh_interval); + + let lock_fut = self.client.lock(self.format_key(key), options.into()); + tokio::pin!(lock_fut); + + // Keep refreshing the lease while waiting for the lock. + loop { + tokio::select! { + _ = ct.cancelled() => { + return Ok(None); + } + _ = refresh_interval.tick() => { + keep_alive.keep_alive().await.change_context(EtcdClientError)?; + } + response = &mut lock_fut => { + let inner = response.change_context(EtcdClientError) + .attach_printable("failed to lock key") + .attach_printable_lazy(|| format!("key: {}", key))?; + + let lock = Lock { + inner, + keeper: keep_alive, + lease_id, + min_refresh_interval, + last_refresh: Instant::now(), + }; + + return Ok(lock.into()) + } + } + } + } + + #[tracing::instrument(level = "debug", skip_all)] + pub async fn unlock(&mut self, lock: Lock) -> Result<(), EtcdClientError> { + let lock = lock.inner; + let key = lock.key().to_vec(); + self.client + .unlock(key) + .await + .change_context(EtcdClientError) + .attach_printable("failed to unlock lock")?; + + Ok(()) + } + + #[tracing::instrument(level = "debug", skip_all)] + pub async fn is_locked(&mut self, lock: &Lock) -> Result { + let response = self + .client + .lease_time_to_live(lock.lease_id, None) + .await + .change_context(EtcdClientError) + .attach_printable("failed to get lease time to live") + .attach_printable_lazy(|| format!("lease id: {}", lock.lease_id))?; + Ok(response.ttl() > 0) + } + + fn format_key(&self, key: &str) -> Vec { + format!("{}{}", self.prefix, key).into_bytes() + } +} + +impl Lock { + pub async fn keep_alive(&mut self) -> Result<(), EtcdClientError> { + if self.last_refresh.elapsed() <= self.min_refresh_interval { + return Ok(()); + } + + debug!(lease_id = %self.lease_id, "send keep alive message"); + self.keeper + .keep_alive() + .await + .change_context(EtcdClientError) + .attach_printable("failed to keep lease alive")?; + self.last_refresh = Instant::now(); + + Ok(()) + } +} +impl Default for LockOptions { + fn default() -> Self { + Self { ttl: 60 } + } +} diff --git a/etcd/src/utils.rs b/etcd/src/utils.rs new file mode 100644 index 00000000..30f9634c --- /dev/null +++ b/etcd/src/utils.rs @@ -0,0 +1,8 @@ +pub fn normalize_prefix(prefix: Option) -> String { + let prefix = prefix.unwrap_or_default(); + if prefix.ends_with("/") || prefix.is_empty() { + prefix + } else { + format!("{}/", prefix) + } +} diff --git a/etcd/src/watch.rs b/etcd/src/watch.rs new file mode 100644 index 00000000..e512af53 --- /dev/null +++ b/etcd/src/watch.rs @@ -0,0 +1,49 @@ +use error_stack::{Result, ResultExt}; +use etcd_client::WatchOptions; +use futures::{Stream, StreamExt}; +use tokio_util::sync::CancellationToken; + +use crate::client::EtcdClientError; + +pub use etcd_client::{WatchResponse, Watcher}; + +#[derive(Clone)] +pub struct WatchClient { + pub(crate) client: etcd_client::WatchClient, + pub(crate) prefix: String, +} + +impl WatchClient { + pub async fn watch_prefix( + &mut self, + key: impl AsRef, + ct: CancellationToken, + ) -> Result< + ( + Watcher, + impl Stream>, + ), + EtcdClientError, + > { + let key = key.as_ref(); + let options = WatchOptions::new().with_prefix(); + + let (watcher, stream) = self + .client + .watch(self.format_key(key), options.into()) + .await + .change_context(EtcdClientError) + .attach_printable("failed to watch key with prefix from etcd") + .attach_printable_lazy(|| format!("prefix: {}", key))?; + + let stream = stream + .map(|res| res.change_context(EtcdClientError)) + .take_until(async move { ct.cancelled().await }); + + Ok((watcher, stream)) + } + + fn format_key(&self, key: &str) -> Vec { + format!("{}{}", self.prefix, key).into_bytes() + } +} diff --git a/evm/Cargo.toml b/evm/Cargo.toml new file mode 100644 index 00000000..7bb12bc8 --- /dev/null +++ b/evm/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "apibara-dna-evm" +version = "0.0.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true +license.workspace = true + +[lib] +name = "apibara_dna_evm" +path = "src/lib.rs" + +[[bin]] +name = "apibara-dna-evm" +path = "src/bin.rs" + +[dependencies] +alloy-rpc-client = "0.3.6" +alloy-provider = "0.3.6" +alloy-primitives.workspace = true +alloy-rpc-types = "0.3.6" +alloy-transport = "0.3.6" +apibara-observability = { path = "../observability" } +apibara-dna-common = { path = "../common" } +apibara-dna-protocol = { path = "../protocol" } +byte-unit.workspace = true +clap.workspace = true +ctrlc.workspace = true +error-stack.workspace = true +futures.workspace = true +hex.workspace = true +mimalloc.workspace = true +prost.workspace = true +prost-types.workspace = true +reqwest.workspace = true +roaring.workspace = true +rkyv.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +tonic.workspace = true +tokio.workspace = true +tokio-util.workspace = true +tracing.workspace = true +url.workspace = true diff --git a/evm/src/bin.rs b/evm/src/bin.rs new file mode 100644 index 00000000..f1a025da --- /dev/null +++ b/evm/src/bin.rs @@ -0,0 +1,36 @@ +use apibara_dna_evm::{cli::Cli, error::EvmError}; +use apibara_observability::init_opentelemetry; +use clap::Parser; +use error_stack::{Result, ResultExt}; +use mimalloc::MiMalloc; +use tokio_util::sync::CancellationToken; +use tracing::info; + +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +#[tokio::main] +async fn main() -> Result<(), EvmError> { + let args = Cli::parse(); + run_with_args(args).await +} + +async fn run_with_args(args: Cli) -> Result<(), EvmError> { + init_opentelemetry(env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")) + .change_context(EvmError) + .attach_printable("failed to initialize opentelemetry")?; + + let ct = CancellationToken::new(); + + ctrlc::set_handler({ + let ct = ct.clone(); + move || { + info!("SIGINT received"); + ct.cancel(); + } + }) + .change_context(EvmError) + .attach_printable("failed to set SIGINT handler")?; + + args.run(ct).await +} diff --git a/evm/src/cli/dbg.rs b/evm/src/cli/dbg.rs new file mode 100644 index 00000000..25c483e9 --- /dev/null +++ b/evm/src/cli/dbg.rs @@ -0,0 +1,91 @@ +use alloy_primitives::hex::FromHex; +use clap::Subcommand; +use error_stack::{Result, ResultExt}; +use tracing::info; + +use crate::{ + cli::rpc::RpcArgs, + error::EvmError, + provider::{models, BlockId, JsonRpcProvider}, +}; + +#[derive(Subcommand, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum DebugRpcCommand { + /// Get a block with full transactions. + GetBlockWithTransactions { + #[clap(flatten)] + rpc: RpcArgs, + #[arg(long, env, default_value = "head")] + block_id: String, + }, + /// Get a block with its receipts. + GetBlockReceipts { + #[clap(flatten)] + rpc: RpcArgs, + #[arg(long, env, default_value = "head")] + block_id: String, + }, +} + +impl DebugRpcCommand { + pub async fn run(self) -> Result<(), EvmError> { + let rpc_provider = self.rpc_provider()?; + let block_id = self.block_id()?; + + match self { + DebugRpcCommand::GetBlockWithTransactions { .. } => { + info!(block_id = ?block_id, "getting block with transactions"); + let block_with_transactions = rpc_provider + .get_block_with_transactions(block_id) + .await + .change_context(EvmError)?; + + println!("{:#?}", block_with_transactions); + + Ok(()) + } + DebugRpcCommand::GetBlockReceipts { .. } => { + info!(block_id = ?block_id, "getting block receipts"); + let block_receipts = rpc_provider + .get_block_receipts(block_id) + .await + .change_context(EvmError)?; + + println!("{:#?}", block_receipts); + + Ok(()) + } + } + } + + fn rpc_provider(&self) -> Result { + match self { + DebugRpcCommand::GetBlockWithTransactions { rpc, .. } => rpc.to_json_rpc_provider(), + DebugRpcCommand::GetBlockReceipts { rpc, .. } => rpc.to_json_rpc_provider(), + } + } + + fn block_id(&self) -> Result { + let block_id = match self { + DebugRpcCommand::GetBlockWithTransactions { block_id, .. } => block_id, + DebugRpcCommand::GetBlockReceipts { block_id, .. } => block_id, + }; + + match block_id.as_str() { + "head" => Ok(BlockId::latest()), + "finalized" => Ok(BlockId::finalized()), + str_value => { + if let Ok(number) = str_value.parse::() { + return Ok(BlockId::Number(number.into())); + } + if let Ok(hash) = models::B256::from_hex(str_value) { + return Ok(BlockId::Hash(hash.into())); + } + Err(EvmError) + .attach_printable("invalid block id") + .attach_printable_lazy(|| format!("block id: {}", block_id)) + } + } + } +} diff --git a/evm/src/cli/mod.rs b/evm/src/cli/mod.rs new file mode 100644 index 00000000..3b6f6906 --- /dev/null +++ b/evm/src/cli/mod.rs @@ -0,0 +1,47 @@ +mod dbg; +mod rpc; +mod start; + +use apibara_dna_common::dbg::DebugIndexCommand; +use clap::{Parser, Subcommand}; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; + +use crate::error::EvmError; + +use self::{dbg::DebugRpcCommand, start::StartCommand}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Cli { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +pub enum Command { + /// Start the EVM DNA server. + Start(Box), + /// Debug EVM RPC calls. + #[command(name = "dbg-rpc")] + DebugRpc { + #[clap(subcommand)] + command: DebugRpcCommand, + }, + /// Debug the index file. + #[command(name = "dbg-index")] + DebugIndex { + #[clap(subcommand)] + command: DebugIndexCommand, + }, +} + +impl Cli { + pub async fn run(self, ct: CancellationToken) -> Result<(), EvmError> { + match self.command { + Command::Start(command) => command.run(ct).await, + Command::DebugRpc { command } => command.run().await, + Command::DebugIndex { command } => command.run().await.change_context(EvmError), + } + } +} diff --git a/evm/src/cli/rpc.rs b/evm/src/cli/rpc.rs new file mode 100644 index 00000000..af0ff60b --- /dev/null +++ b/evm/src/cli/rpc.rs @@ -0,0 +1,78 @@ +use std::time::Duration; + +use clap::Args; +use error_stack::{Result, ResultExt}; +use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; +use url::Url; + +use crate::{ + error::EvmError, + provider::{JsonRpcProvider, JsonRpcProviderOptions}, +}; + +#[derive(Args, Debug)] +pub struct RpcArgs { + /// Evm RPC URL. + #[arg( + long = "rpc.url", + env = "EVM_RPC_URL", + default_value = "http://localhost:9545" + )] + pub rpc_url: String, + + /// Request timeout. + #[arg( + long = "rpc.timeout-sec", + env = "EVM_RPC_TIMEOUT_SEC", + default_value = "20" + )] + pub rpc_timeout_sec: u64, + + /// Headers to send with the requests. + #[arg(long = "rpc.headers", env = "EVM_RPC_HEADERS")] + pub rpc_headers: Vec, +} + +impl RpcArgs { + pub fn to_json_rpc_provider(&self) -> Result { + let url = self + .rpc_url + .parse::() + .change_context(EvmError) + .attach_printable("failed to parse RPC URL") + .attach_printable_lazy(|| format!("url: {}", self.rpc_url))?; + + let headers = { + let mut headers = HeaderMap::default(); + + for kv in self.rpc_headers.iter() { + let (key, value) = kv + .split_once(':') + .ok_or(EvmError) + .attach_printable("invalid header") + .attach_printable_lazy(|| format!("header: {}", kv))?; + + headers.insert( + key.parse::() + .change_context(EvmError) + .attach_printable("invalid header name") + .attach_printable_lazy(|| format!("header name: {}", key))?, + value + .parse::() + .change_context(EvmError) + .attach_printable("invalid header value") + .attach_printable_lazy(|| format!("header value: {}", value))?, + ); + } + + headers + }; + + let options = JsonRpcProviderOptions { + timeout: Duration::from_secs(self.rpc_timeout_sec), + headers, + }; + + JsonRpcProvider::new(url, options).change_context(EvmError) + } +} diff --git a/evm/src/cli/start.rs b/evm/src/cli/start.rs new file mode 100644 index 00000000..449cdb5d --- /dev/null +++ b/evm/src/cli/start.rs @@ -0,0 +1,29 @@ +use apibara_dna_common::{run_server, StartArgs}; +use clap::Args; +use error_stack::{Result, ResultExt}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use crate::{error::EvmError, EvmChainSupport}; + +use super::rpc::RpcArgs; + +#[derive(Args, Debug)] +pub struct StartCommand { + #[clap(flatten)] + rpc: RpcArgs, + #[clap(flatten)] + start: StartArgs, +} + +impl StartCommand { + pub async fn run(self, ct: CancellationToken) -> Result<(), EvmError> { + info!("Starting EVM DNA server"); + let provider = self.rpc.to_json_rpc_provider()?; + let evm_chain = EvmChainSupport::new(provider); + + run_server(evm_chain, self.start, ct) + .await + .change_context(EvmError) + } +} diff --git a/evm/src/error.rs b/evm/src/error.rs new file mode 100644 index 00000000..26fbfd6d --- /dev/null +++ b/evm/src/error.rs @@ -0,0 +1,10 @@ +#[derive(Debug)] +pub struct EvmError; + +impl error_stack::Context for EvmError {} + +impl std::fmt::Display for EvmError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "EVM DNA error") + } +} diff --git a/evm/src/filter/helpers.rs b/evm/src/filter/helpers.rs new file mode 100644 index 00000000..1a264c03 --- /dev/null +++ b/evm/src/filter/helpers.rs @@ -0,0 +1,9 @@ +use apibara_dna_common::query::{BlockFilter, Filter}; + +pub trait BlockFilterExt { + fn compile_to_block_filter(&self) -> tonic::Result; +} + +pub trait FragmentFilterExt { + fn compile_to_filter(&self) -> tonic::Result; +} diff --git a/evm/src/filter/log.rs b/evm/src/filter/log.rs new file mode 100644 index 00000000..12d58c2b --- /dev/null +++ b/evm/src/filter/log.rs @@ -0,0 +1,109 @@ +use apibara_dna_common::{ + index::ScalarValue, + query::{Condition, Filter}, +}; +use apibara_dna_protocol::evm; + +use crate::fragment::{ + INDEX_LOG_BY_ADDRESS, INDEX_LOG_BY_TOPIC0, INDEX_LOG_BY_TOPIC1, INDEX_LOG_BY_TOPIC2, + INDEX_LOG_BY_TOPIC3, INDEX_LOG_BY_TOPIC_LENGTH, INDEX_LOG_BY_TRANSACTION_STATUS, + LOG_FRAGMENT_ID, RECEIPT_FRAGMENT_ID, TRANSACTION_FRAGMENT_ID, +}; + +use super::helpers::FragmentFilterExt; + +impl FragmentFilterExt for evm::LogFilter { + fn compile_to_filter(&self) -> tonic::Result { + let mut conditions = Vec::new(); + + if let Some(address) = self.address { + conditions.push(Condition { + index_id: INDEX_LOG_BY_ADDRESS, + key: ScalarValue::B160(address.to_bytes()), + }); + } + + if let Some(true) = self.strict { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TOPIC_LENGTH, + key: ScalarValue::Uint32(self.topics.len() as u32), + }); + } + + let mut topics = self.topics.iter(); + + if let Some(topic) = topics.next().and_then(|t| t.value.as_ref()) { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TOPIC0, + key: ScalarValue::B256(topic.to_bytes()), + }); + } + if let Some(topic) = topics.next().and_then(|t| t.value.as_ref()) { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TOPIC1, + key: ScalarValue::B256(topic.to_bytes()), + }); + } + if let Some(topic) = topics.next().and_then(|t| t.value.as_ref()) { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TOPIC2, + key: ScalarValue::B256(topic.to_bytes()), + }); + } + if let Some(topic) = topics.next().and_then(|t| t.value.as_ref()) { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TOPIC3, + key: ScalarValue::B256(topic.to_bytes()), + }); + } + + let transaction_status = if let Some(transaction_status) = self.transaction_status { + evm::TransactionStatusFilter::try_from(transaction_status).map_err(|_| { + tonic::Status::invalid_argument(format!( + "invalid transaction status in log filter with id {}", + self.id + )) + })? + } else { + evm::TransactionStatusFilter::Succeeded + }; + + match transaction_status { + evm::TransactionStatusFilter::Unspecified => {} + evm::TransactionStatusFilter::All => {} + evm::TransactionStatusFilter::Succeeded => { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TRANSACTION_STATUS, + key: ScalarValue::Int32(evm::TransactionStatus::Succeeded as i32), + }); + } + evm::TransactionStatusFilter::Reverted => { + conditions.push(Condition { + index_id: INDEX_LOG_BY_TRANSACTION_STATUS, + key: ScalarValue::Int32(evm::TransactionStatus::Reverted as i32), + }); + } + }; + + let mut joins = Vec::new(); + + if let Some(true) = self.include_transaction { + joins.push(TRANSACTION_FRAGMENT_ID); + } + + if let Some(true) = self.include_receipt { + joins.push(RECEIPT_FRAGMENT_ID); + } + + if let Some(true) = self.include_siblings { + joins.push(LOG_FRAGMENT_ID); + } + + Ok(Filter { + filter_id: self.id, + fragment_id: LOG_FRAGMENT_ID, + conditions, + joins, + }) + } +} diff --git a/evm/src/filter/mod.rs b/evm/src/filter/mod.rs new file mode 100644 index 00000000..37cf346a --- /dev/null +++ b/evm/src/filter/mod.rs @@ -0,0 +1,72 @@ +mod helpers; +mod log; +mod transaction; +mod withdrawal; + +use apibara_dna_common::{data_stream::BlockFilterFactory, query::BlockFilter}; +use apibara_dna_protocol::evm; +use prost::Message; + +use self::helpers::{BlockFilterExt, FragmentFilterExt}; + +pub struct EvmFilterFactory; + +impl BlockFilterFactory for EvmFilterFactory { + fn create_block_filter( + &self, + filters: &[Vec], + ) -> tonic::Result, tonic::Status> { + let proto_filters = filters + .iter() + .map(|bytes| evm::Filter::decode(bytes.as_slice())) + .collect::, _>>() + .map_err(|_| tonic::Status::invalid_argument("failed to decode filter"))?; + + if proto_filters.is_empty() { + return Err(tonic::Status::invalid_argument("no filters provided")); + } + + if proto_filters.len() > 5 { + return Err(tonic::Status::invalid_argument(format!( + "too many filters ({} > 5)", + proto_filters.len(), + ))); + } + + proto_filters + .iter() + .map(BlockFilterExt::compile_to_block_filter) + .collect() + } +} + +impl BlockFilterExt for evm::Filter { + fn compile_to_block_filter(&self) -> tonic::Result { + let mut block_filter = BlockFilter::default(); + + if self.header.map(|h| h.always()).unwrap_or(false) { + block_filter.set_always_include_header(true); + } + + for filter in self.withdrawals.iter() { + let filter = filter.compile_to_filter()?; + block_filter.add_filter(filter); + } + + for filter in self.transactions.iter() { + let filter = filter.compile_to_filter()?; + block_filter.add_filter(filter); + } + + for filter in self.logs.iter() { + let filter = filter.compile_to_filter()?; + block_filter.add_filter(filter); + } + + if !block_filter.always_include_header && block_filter.is_empty() { + return Err(tonic::Status::invalid_argument("no filters provided")); + } + + Ok(block_filter) + } +} diff --git a/evm/src/filter/transaction.rs b/evm/src/filter/transaction.rs new file mode 100644 index 00000000..d80e54e2 --- /dev/null +++ b/evm/src/filter/transaction.rs @@ -0,0 +1,84 @@ +use apibara_dna_common::{ + index::ScalarValue, + query::{Condition, Filter}, +}; +use apibara_dna_protocol::evm; + +use crate::fragment::{ + INDEX_TRANSACTION_BY_CREATE, INDEX_TRANSACTION_BY_FROM_ADDRESS, INDEX_TRANSACTION_BY_STATUS, + INDEX_TRANSACTION_BY_TO_ADDRESS, LOG_FRAGMENT_ID, RECEIPT_FRAGMENT_ID, TRANSACTION_FRAGMENT_ID, +}; + +use super::helpers::FragmentFilterExt; + +impl FragmentFilterExt for evm::TransactionFilter { + fn compile_to_filter(&self) -> tonic::Result { + let mut conditions = Vec::new(); + + if let Some(from) = self.from { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_FROM_ADDRESS, + key: ScalarValue::B160(from.to_bytes()), + }); + } + + if let Some(to) = self.to { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_TO_ADDRESS, + key: ScalarValue::B160(to.to_bytes()), + }); + } + + if let Some(true) = self.create { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_CREATE, + key: ScalarValue::Bool(true), + }); + } + + let transaction_status = if let Some(transaction_status) = self.transaction_status { + evm::TransactionStatusFilter::try_from(transaction_status).map_err(|_| { + tonic::Status::invalid_argument(format!( + "invalid transaction status in transaction filter with id {}", + self.id + )) + })? + } else { + evm::TransactionStatusFilter::Succeeded + }; + + match transaction_status { + evm::TransactionStatusFilter::Unspecified => {} + evm::TransactionStatusFilter::All => {} + evm::TransactionStatusFilter::Succeeded => { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_STATUS, + key: ScalarValue::Int32(evm::TransactionStatus::Succeeded as i32), + }); + } + evm::TransactionStatusFilter::Reverted => { + conditions.push(Condition { + index_id: INDEX_TRANSACTION_BY_STATUS, + key: ScalarValue::Int32(evm::TransactionStatus::Reverted as i32), + }); + } + }; + + let mut joins = Vec::new(); + + if let Some(true) = self.include_receipt { + joins.push(RECEIPT_FRAGMENT_ID); + } + + if let Some(true) = self.include_logs { + joins.push(LOG_FRAGMENT_ID); + } + + Ok(Filter { + filter_id: self.id, + fragment_id: TRANSACTION_FRAGMENT_ID, + conditions, + joins, + }) + } +} diff --git a/evm/src/filter/withdrawal.rs b/evm/src/filter/withdrawal.rs new file mode 100644 index 00000000..d835440b --- /dev/null +++ b/evm/src/filter/withdrawal.rs @@ -0,0 +1,38 @@ +use apibara_dna_common::{ + index::ScalarValue, + query::{Condition, Filter}, +}; +use apibara_dna_protocol::evm; + +use crate::fragment::{ + INDEX_WITHDRAWAL_BY_ADDRESS, INDEX_WITHDRAWAL_BY_VALIDATOR_INDEX, WITHDRAWAL_FRAGMENT_ID, +}; + +use super::helpers::FragmentFilterExt; + +impl FragmentFilterExt for evm::WithdrawalFilter { + fn compile_to_filter(&self) -> tonic::Result { + let mut conditions = Vec::new(); + + if let Some(validator_index) = self.validator_index { + conditions.push(Condition { + index_id: INDEX_WITHDRAWAL_BY_VALIDATOR_INDEX, + key: ScalarValue::Uint32(validator_index), + }); + } + + if let Some(address) = self.address { + conditions.push(Condition { + index_id: INDEX_WITHDRAWAL_BY_ADDRESS, + key: ScalarValue::B160(address.to_bytes()), + }); + } + + Ok(Filter { + filter_id: self.id, + fragment_id: WITHDRAWAL_FRAGMENT_ID, + conditions, + joins: Vec::default(), + }) + } +} diff --git a/evm/src/fragment.rs b/evm/src/fragment.rs new file mode 100644 index 00000000..575f1d9c --- /dev/null +++ b/evm/src/fragment.rs @@ -0,0 +1,33 @@ +//! Fragment constants. + +// Make sure the fragment IDs match the field tags in the protobuf Block message. + +pub const WITHDRAWAL_FRAGMENT_ID: u8 = 2; +pub const WITHDRAWAL_FRAGMENT_NAME: &str = "withdrawal"; + +pub const TRANSACTION_FRAGMENT_ID: u8 = 3; +pub const TRANSACTION_FRAGMENT_NAME: &str = "transaction"; + +pub const RECEIPT_FRAGMENT_ID: u8 = 4; +pub const RECEIPT_FRAGMENT_NAME: &str = "receipt"; + +pub const LOG_FRAGMENT_ID: u8 = 5; +pub const LOG_FRAGMENT_NAME: &str = "log"; + +pub const INDEX_WITHDRAWAL_BY_VALIDATOR_INDEX: u8 = 0; +pub const INDEX_WITHDRAWAL_BY_ADDRESS: u8 = 1; + +pub const INDEX_TRANSACTION_BY_FROM_ADDRESS: u8 = 0; +pub const INDEX_TRANSACTION_BY_TO_ADDRESS: u8 = 1; +pub const INDEX_TRANSACTION_BY_CREATE: u8 = 2; +pub const INDEX_TRANSACTION_BY_STATUS: u8 = 3; + +// No receipts index. + +pub const INDEX_LOG_BY_ADDRESS: u8 = 0; +pub const INDEX_LOG_BY_TOPIC0: u8 = 1; +pub const INDEX_LOG_BY_TOPIC1: u8 = 2; +pub const INDEX_LOG_BY_TOPIC2: u8 = 3; +pub const INDEX_LOG_BY_TOPIC3: u8 = 4; +pub const INDEX_LOG_BY_TOPIC_LENGTH: u8 = 5; +pub const INDEX_LOG_BY_TRANSACTION_STATUS: u8 = 6; diff --git a/evm/src/ingestion.rs b/evm/src/ingestion.rs new file mode 100644 index 00000000..30e619ac --- /dev/null +++ b/evm/src/ingestion.rs @@ -0,0 +1,603 @@ +use alloy_rpc_types::BlockId; +use apibara_dna_common::{ + chain::BlockInfo, + fragment::{ + Block, BodyFragment, HeaderFragment, Index, IndexFragment, IndexGroupFragment, Join, + JoinFragment, JoinGroupFragment, + }, + index::{BitmapIndexBuilder, ScalarValue}, + ingestion::{BlockIngestion, IngestionError}, + join::{JoinToManyIndexBuilder, JoinToOneIndexBuilder}, + Cursor, +}; +use apibara_dna_protocol::evm; +use error_stack::{Result, ResultExt}; +use prost::Message; + +use crate::{ + fragment::{ + INDEX_LOG_BY_ADDRESS, INDEX_LOG_BY_TOPIC0, INDEX_LOG_BY_TOPIC1, INDEX_LOG_BY_TOPIC2, + INDEX_LOG_BY_TOPIC3, INDEX_LOG_BY_TOPIC_LENGTH, INDEX_LOG_BY_TRANSACTION_STATUS, + INDEX_TRANSACTION_BY_CREATE, INDEX_TRANSACTION_BY_FROM_ADDRESS, + INDEX_TRANSACTION_BY_STATUS, INDEX_TRANSACTION_BY_TO_ADDRESS, INDEX_WITHDRAWAL_BY_ADDRESS, + INDEX_WITHDRAWAL_BY_VALIDATOR_INDEX, LOG_FRAGMENT_ID, LOG_FRAGMENT_NAME, + RECEIPT_FRAGMENT_ID, RECEIPT_FRAGMENT_NAME, TRANSACTION_FRAGMENT_ID, + TRANSACTION_FRAGMENT_NAME, WITHDRAWAL_FRAGMENT_ID, WITHDRAWAL_FRAGMENT_NAME, + }, + proto::{convert_block_header, ModelExt}, + provider::{models, BlockExt, JsonRpcProvider}, +}; + +#[derive(Clone)] +pub struct EvmBlockIngestion { + provider: JsonRpcProvider, +} + +impl EvmBlockIngestion { + pub fn new(provider: JsonRpcProvider) -> Self { + Self { provider } + } +} + +impl BlockIngestion for EvmBlockIngestion { + #[tracing::instrument("evm_get_head_cursor", skip_all, err(Debug))] + async fn get_head_cursor(&self) -> Result { + let block = self + .provider + .get_block_header(BlockId::latest()) + .await + .change_context(IngestionError::RpcRequest)?; + + Ok(block.cursor()) + } + + #[tracing::instrument("evm_get_finalized_cursor", skip_all, err(Debug))] + async fn get_finalized_cursor(&self) -> Result { + let block = self + .provider + .get_block_header(BlockId::finalized()) + .await + .change_context(IngestionError::RpcRequest)?; + + Ok(block.cursor()) + } + + #[tracing::instrument("evm_get_block_info_by_number", skip(self), err(Debug))] + async fn get_block_info_by_number( + &self, + block_number: u64, + ) -> Result { + let block = self + .provider + .get_block_header(BlockId::number(block_number)) + .await + .change_context(IngestionError::RpcRequest)?; + + Ok(block.block_info()) + } + + #[tracing::instrument("evm_ingest_block_by_number", skip(self), err(Debug))] + async fn ingest_block_by_number( + &self, + block_number: u64, + ) -> Result<(BlockInfo, Block), IngestionError> { + let mut block_with_transactions = self + .provider + .get_block_with_transactions(BlockId::number(block_number)) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get block with transactions") + .attach_printable_lazy(|| format!("block number: {}", block_number))?; + + let block_id = BlockId::hash(block_with_transactions.header.hash); + + let block_receipts = self + .provider + .get_block_receipts(block_id) + .await + .change_context(IngestionError::RpcRequest) + .attach_printable("failed to get block receipts") + .attach_printable_lazy(|| format!("block number: {}", block_number)) + .attach_printable_lazy(|| { + format!("block hash: {}", block_with_transactions.header.hash) + })?; + + let block_transactions = std::mem::take(&mut block_with_transactions.transactions); + let Some(block_transactions) = block_transactions.as_transactions() else { + return Err(IngestionError::RpcRequest) + .attach_printable("unexpected transactions as hashes"); + }; + + let block_withdrawals = + std::mem::take(&mut block_with_transactions.withdrawals).unwrap_or_default(); + + let block_info = block_with_transactions.block_info(); + + let header_fragment = { + let header = convert_block_header(block_with_transactions.header); + HeaderFragment { + data: header.encode_to_vec(), + } + }; + + let (body, index, join) = + collect_block_body_and_index(block_transactions, &block_withdrawals, &block_receipts)?; + + let block = Block { + header: header_fragment, + index, + body, + join, + }; + + Ok((block_info, block)) + } +} + +fn collect_block_body_and_index( + transactions: &[models::Transaction], + withdrawals: &[models::Withdrawal], + receipts: &[models::TransactionReceipt], +) -> Result<(Vec, IndexGroupFragment, JoinGroupFragment), IngestionError> { + let mut block_withdrawals = Vec::new(); + let mut block_transactions = Vec::new(); + let mut block_receipts = Vec::new(); + let mut block_logs = Vec::new(); + + let mut index_withdrawal_by_validator_index = BitmapIndexBuilder::default(); + let mut index_withdrawal_by_address = BitmapIndexBuilder::default(); + + let mut index_transaction_by_from_address = BitmapIndexBuilder::default(); + let mut index_transaction_by_to_address = BitmapIndexBuilder::default(); + let mut index_transaction_by_create = BitmapIndexBuilder::default(); + let mut index_transaction_by_status = BitmapIndexBuilder::default(); + let mut join_transaction_to_receipt = JoinToOneIndexBuilder::default(); + let mut join_transaction_to_logs = JoinToManyIndexBuilder::default(); + + let mut index_log_by_address = BitmapIndexBuilder::default(); + let mut index_log_by_topic0 = BitmapIndexBuilder::default(); + let mut index_log_by_topic1 = BitmapIndexBuilder::default(); + let mut index_log_by_topic2 = BitmapIndexBuilder::default(); + let mut index_log_by_topic3 = BitmapIndexBuilder::default(); + let mut index_log_by_topic_length = BitmapIndexBuilder::default(); + let mut index_log_by_transaction_status = BitmapIndexBuilder::default(); + let mut join_log_to_transaction = JoinToOneIndexBuilder::default(); + let mut join_log_to_receipt = JoinToOneIndexBuilder::default(); + let mut join_log_to_siblings = JoinToManyIndexBuilder::default(); + + for (withdrawal_index, withdrawal) in withdrawals.iter().enumerate() { + let withdrawal_index = withdrawal_index as u32; + + let mut withdrawal = withdrawal.to_proto(); + + withdrawal.withdrawal_index = withdrawal_index; + + index_withdrawal_by_validator_index.insert( + ScalarValue::Uint32(withdrawal.validator_index), + withdrawal_index, + ); + + if let Some(address) = withdrawal.address { + index_withdrawal_by_address + .insert(ScalarValue::B160(address.to_bytes()), withdrawal_index); + } + + block_withdrawals.push(withdrawal); + } + + for (transaction_index, (transaction, receipt)) in + transactions.iter().zip(receipts.iter()).enumerate() + { + let transaction_index = transaction_index as u32; + let transaction_hash = receipt.transaction_hash.to_proto(); + + let transaction_status = if receipt.status() { + evm::TransactionStatus::Succeeded as i32 + } else { + evm::TransactionStatus::Reverted as i32 + }; + + if let Some(rpc_transaction_index) = transaction.transaction_index { + if rpc_transaction_index != transaction_index as u64 { + return Err(IngestionError::Model) + .attach_printable("transaction index mismatch") + .attach_printable_lazy(|| format!("transaction index: {}", transaction_index)) + .attach_printable_lazy(|| { + format!("rpc transaction index: {}", rpc_transaction_index) + }); + } + } + + if let Some(rpc_transaction_index) = receipt.transaction_index { + if rpc_transaction_index != transaction_index as u64 { + return Err(IngestionError::Model) + .attach_printable("transaction index mismatch in receipt") + .attach_printable_lazy(|| format!("transaction index: {}", transaction_index)) + .attach_printable_lazy(|| { + format!("rpc transaction index: {}", rpc_transaction_index) + }); + } + } + + let mut transaction = transaction.to_proto(); + + transaction.transaction_index = transaction_index; + transaction.transaction_hash = transaction_hash.into(); + transaction.transaction_status = transaction_status; + + join_transaction_to_receipt.insert(transaction_index, transaction_index); + + if let Some(from) = transaction.from { + index_transaction_by_from_address + .insert(ScalarValue::B160(from.to_bytes()), transaction_index); + } + + match transaction.to { + Some(to) => { + index_transaction_by_to_address + .insert(ScalarValue::B160(to.to_bytes()), transaction_index); + index_transaction_by_create.insert(ScalarValue::Bool(false), transaction_index); + } + None => { + index_transaction_by_create.insert(ScalarValue::Bool(true), transaction_index); + } + } + + index_transaction_by_status + .insert(ScalarValue::Int32(transaction_status), transaction_index); + + block_transactions.push(transaction); + + let mut transaction_logs_id = Vec::new(); + + for log in receipt.inner.logs() { + let log_index = block_logs.len() as u32; + + if let Some(rpc_log_index) = log.log_index { + if rpc_log_index != log_index as u64 { + return Err(IngestionError::Model) + .attach_printable("log index mismatch in receipt") + .attach_printable_lazy(|| format!("expected log index: {}", log_index)) + .attach_printable_lazy(|| format!("rpc log index: {}", rpc_log_index)); + } + } + + if let Some(rpc_transaction_index) = log.transaction_index { + if rpc_transaction_index != transaction_index as u64 { + return Err(IngestionError::Model) + .attach_printable("transaction index mismatch in log") + .attach_printable_lazy(|| { + format!("transaction index: {}", transaction_index) + }) + .attach_printable_lazy(|| { + format!("rpc transaction index: {}", rpc_transaction_index) + }); + } + } + + let mut log = log.to_proto(); + + log.log_index = log_index; + log.transaction_index = transaction_index; + log.transaction_hash = transaction_hash.into(); + log.transaction_status = transaction_status; + + join_log_to_transaction.insert(log_index, transaction_index); + join_log_to_receipt.insert(log_index, transaction_index); + join_transaction_to_logs.insert(transaction_index, log_index); + + transaction_logs_id.push(log_index); + + if let Some(address) = log.address { + index_log_by_address.insert(ScalarValue::B160(address.to_bytes()), log_index); + } + + let mut topics = log.topics.iter(); + + if let Some(topic) = topics.next() { + index_log_by_topic0.insert(ScalarValue::B256(topic.to_bytes()), log_index); + } + if let Some(topic) = topics.next() { + index_log_by_topic1.insert(ScalarValue::B256(topic.to_bytes()), log_index); + } + if let Some(topic) = topics.next() { + index_log_by_topic2.insert(ScalarValue::B256(topic.to_bytes()), log_index); + } + if let Some(topic) = topics.next() { + index_log_by_topic3.insert(ScalarValue::B256(topic.to_bytes()), log_index); + } + + index_log_by_topic_length + .insert(ScalarValue::Uint32(log.topics.len() as u32), log_index); + + index_log_by_transaction_status + .insert(ScalarValue::Int32(transaction_status), log_index); + + block_logs.push(log); + } + + for log_id in transaction_logs_id.iter() { + for sibling_id in transaction_logs_id.iter() { + if sibling_id != log_id { + join_log_to_siblings.insert(*log_id, *sibling_id); + } + } + } + + let mut receipt = receipt.to_proto(); + + receipt.transaction_index = transaction_index; + receipt.transaction_hash = transaction_hash.into(); + receipt.transaction_status = transaction_status; + + block_receipts.push(receipt); + } + + let withdrawal_index = { + let index_withdrawal_by_validator_index = Index { + index_id: INDEX_WITHDRAWAL_BY_VALIDATOR_INDEX, + index: index_withdrawal_by_validator_index + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_withdrawal_by_address = Index { + index_id: INDEX_WITHDRAWAL_BY_ADDRESS, + index: index_withdrawal_by_address + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + IndexFragment { + fragment_id: WITHDRAWAL_FRAGMENT_ID, + range_start: 0, + range_len: block_withdrawals.len() as u32, + indexes: vec![ + index_withdrawal_by_validator_index, + index_withdrawal_by_address, + ], + } + }; + + let withdrawal_join = JoinFragment { + fragment_id: WITHDRAWAL_FRAGMENT_ID, + joins: Vec::default(), + }; + + let withdrawal_fragment = BodyFragment { + fragment_id: WITHDRAWAL_FRAGMENT_ID, + name: WITHDRAWAL_FRAGMENT_NAME.to_string(), + data: block_withdrawals + .iter() + .map(Message::encode_to_vec) + .collect(), + }; + + let transaction_index = { + let index_transaction_by_from_address = Index { + index_id: INDEX_TRANSACTION_BY_FROM_ADDRESS, + index: index_transaction_by_from_address + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_transaction_by_to_address = Index { + index_id: INDEX_TRANSACTION_BY_TO_ADDRESS, + index: index_transaction_by_to_address + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_transaction_by_create = Index { + index_id: INDEX_TRANSACTION_BY_CREATE, + index: index_transaction_by_create + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_transaction_by_status = Index { + index_id: INDEX_TRANSACTION_BY_STATUS, + index: index_transaction_by_status + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + IndexFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + range_start: 0, + range_len: block_transactions.len() as u32, + indexes: vec![ + index_transaction_by_from_address, + index_transaction_by_to_address, + index_transaction_by_create, + index_transaction_by_status, + ], + } + }; + + let transaction_join = { + let join_transaction_to_receipt = Join { + to_fragment_id: RECEIPT_FRAGMENT_ID, + index: join_transaction_to_receipt.build().into(), + }; + + let join_transaction_to_logs = Join { + to_fragment_id: LOG_FRAGMENT_ID, + index: join_transaction_to_logs + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + JoinFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + joins: vec![join_transaction_to_receipt, join_transaction_to_logs], + } + }; + + let transaction_fragment = BodyFragment { + fragment_id: TRANSACTION_FRAGMENT_ID, + name: TRANSACTION_FRAGMENT_NAME.to_string(), + data: block_transactions + .iter() + .map(Message::encode_to_vec) + .collect(), + }; + + // Empty since no receipt filter. + let receipt_index = IndexFragment { + fragment_id: RECEIPT_FRAGMENT_ID, + range_start: 0, + range_len: block_receipts.len() as u32, + indexes: Vec::default(), + }; + + let receipt_join = JoinFragment { + fragment_id: RECEIPT_FRAGMENT_ID, + joins: Vec::default(), + }; + + let receipt_fragment = BodyFragment { + fragment_id: RECEIPT_FRAGMENT_ID, + name: RECEIPT_FRAGMENT_NAME.to_string(), + data: block_receipts.iter().map(Message::encode_to_vec).collect(), + }; + + let log_index = { + let index_log_by_address = Index { + index_id: INDEX_LOG_BY_ADDRESS, + index: index_log_by_address + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_log_by_topic0 = Index { + index_id: INDEX_LOG_BY_TOPIC0, + index: index_log_by_topic0 + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_log_by_topic1 = Index { + index_id: INDEX_LOG_BY_TOPIC1, + index: index_log_by_topic1 + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_log_by_topic2 = Index { + index_id: INDEX_LOG_BY_TOPIC2, + index: index_log_by_topic2 + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_log_by_topic3 = Index { + index_id: INDEX_LOG_BY_TOPIC3, + index: index_log_by_topic3 + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_log_by_topic_length = Index { + index_id: INDEX_LOG_BY_TOPIC_LENGTH, + index: index_log_by_topic_length + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + let index_log_by_transaction_status = Index { + index_id: INDEX_LOG_BY_TRANSACTION_STATUS, + index: index_log_by_transaction_status + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + IndexFragment { + fragment_id: LOG_FRAGMENT_ID, + range_start: 0, + range_len: block_logs.len() as u32, + indexes: vec![ + index_log_by_address, + index_log_by_topic0, + index_log_by_topic1, + index_log_by_topic2, + index_log_by_topic3, + index_log_by_topic_length, + index_log_by_transaction_status, + ], + } + }; + + let log_join = { + let join_log_to_transaction = Join { + to_fragment_id: TRANSACTION_FRAGMENT_ID, + index: join_log_to_transaction.build().into(), + }; + + let join_log_to_receipt = Join { + to_fragment_id: RECEIPT_FRAGMENT_ID, + index: join_log_to_receipt.build().into(), + }; + + let join_log_to_siblings = Join { + to_fragment_id: LOG_FRAGMENT_ID, + index: join_log_to_siblings + .build() + .change_context(IngestionError::Indexing)? + .into(), + }; + + JoinFragment { + fragment_id: LOG_FRAGMENT_ID, + joins: vec![ + join_log_to_transaction, + join_log_to_receipt, + join_log_to_siblings, + ], + } + }; + + let log_fragment = BodyFragment { + fragment_id: LOG_FRAGMENT_ID, + name: LOG_FRAGMENT_NAME.to_string(), + data: block_logs.iter().map(Message::encode_to_vec).collect(), + }; + + let index_group = IndexGroupFragment { + indexes: vec![ + withdrawal_index, + transaction_index, + receipt_index, + log_index, + ], + }; + + let join_group = JoinGroupFragment { + joins: vec![withdrawal_join, transaction_join, receipt_join, log_join], + }; + + Ok(( + vec![ + withdrawal_fragment, + transaction_fragment, + receipt_fragment, + log_fragment, + ], + index_group, + join_group, + )) +} diff --git a/evm/src/lib.rs b/evm/src/lib.rs new file mode 100644 index 00000000..1fd0e2e9 --- /dev/null +++ b/evm/src/lib.rs @@ -0,0 +1,64 @@ +pub mod cli; +pub mod error; +pub mod filter; +pub mod fragment; +pub mod ingestion; +pub mod proto; +pub mod provider; + +use apibara_dna_common::{fragment::FragmentInfo, ChainSupport}; + +use crate::{ + filter::EvmFilterFactory, + fragment::{ + LOG_FRAGMENT_ID, LOG_FRAGMENT_NAME, RECEIPT_FRAGMENT_ID, RECEIPT_FRAGMENT_NAME, + TRANSACTION_FRAGMENT_ID, TRANSACTION_FRAGMENT_NAME, WITHDRAWAL_FRAGMENT_ID, + WITHDRAWAL_FRAGMENT_NAME, + }, + ingestion::EvmBlockIngestion, + provider::JsonRpcProvider, +}; + +pub struct EvmChainSupport { + provider: JsonRpcProvider, +} + +impl EvmChainSupport { + pub fn new(provider: JsonRpcProvider) -> Self { + Self { provider } + } +} + +impl ChainSupport for EvmChainSupport { + type BlockIngestion = EvmBlockIngestion; + type BlockFilterFactory = EvmFilterFactory; + + fn fragment_info(&self) -> Vec { + vec![ + FragmentInfo { + fragment_id: WITHDRAWAL_FRAGMENT_ID, + name: WITHDRAWAL_FRAGMENT_NAME.to_string(), + }, + FragmentInfo { + fragment_id: TRANSACTION_FRAGMENT_ID, + name: TRANSACTION_FRAGMENT_NAME.to_string(), + }, + FragmentInfo { + fragment_id: RECEIPT_FRAGMENT_ID, + name: RECEIPT_FRAGMENT_NAME.to_string(), + }, + FragmentInfo { + fragment_id: LOG_FRAGMENT_ID, + name: LOG_FRAGMENT_NAME.to_string(), + }, + ] + } + + fn block_filter_factory(&self) -> Self::BlockFilterFactory { + EvmFilterFactory + } + + fn block_ingestion(&self) -> Self::BlockIngestion { + EvmBlockIngestion::new(self.provider.clone()) + } +} diff --git a/evm/src/proto.rs b/evm/src/proto.rs new file mode 100644 index 00000000..33f21242 --- /dev/null +++ b/evm/src/proto.rs @@ -0,0 +1,207 @@ +use apibara_dna_protocol::evm; + +use crate::provider::models; + +pub trait ModelExt { + type Proto; + fn to_proto(&self) -> Self::Proto; +} + +pub fn convert_block_header(block: models::Header) -> evm::BlockHeader { + let timestamp = prost_types::Timestamp { + seconds: block.timestamp as i64, + nanos: 0, + }; + + evm::BlockHeader { + block_number: block.number, + block_hash: block.hash.to_proto().into(), + parent_block_hash: block.parent_hash.to_proto().into(), + uncles_hash: block.uncles_hash.to_proto().into(), + miner: block.miner.to_proto().into(), + state_root: block.state_root.to_proto().into(), + transactions_root: block.transactions_root.to_proto().into(), + receipts_root: block.receipts_root.to_proto().into(), + logs_bloom: block.logs_bloom.to_proto().into(), + difficulty: block.difficulty.to_proto().into(), + gas_limit: block.gas_limit.to_proto().into(), + gas_used: block.gas_used.to_proto().into(), + timestamp: timestamp.into(), + extra_data: block.extra_data.to_vec(), + mix_hash: block.mix_hash.as_ref().map(ModelExt::to_proto), + nonce: block.nonce.map(|n| u64::from_be_bytes(n.0)), + base_fee_per_gas: block.base_fee_per_gas.as_ref().map(ModelExt::to_proto), + withdrawals_root: block.withdrawals_root.as_ref().map(ModelExt::to_proto), + total_difficulty: block.total_difficulty.as_ref().map(ModelExt::to_proto), + blob_gas_used: block.blob_gas_used.as_ref().map(ModelExt::to_proto), + excess_blob_gas: block.excess_blob_gas.as_ref().map(ModelExt::to_proto), + parent_beacon_block_root: block + .parent_beacon_block_root + .as_ref() + .map(ModelExt::to_proto), + } +} + +impl ModelExt for models::Transaction { + type Proto = evm::Transaction; + + fn to_proto(&self) -> Self::Proto { + evm::Transaction { + filter_ids: Vec::new(), + transaction_index: u32::MAX, + transaction_hash: None, + nonce: self.nonce, + from: self.from.to_proto().into(), + to: self.to.as_ref().map(ModelExt::to_proto), + value: self.value.to_proto().into(), + gas_price: self.gas_price.as_ref().map(ModelExt::to_proto), + gas: self.gas.to_proto().into(), + max_fee_per_gas: self.max_fee_per_gas.as_ref().map(ModelExt::to_proto), + max_priority_fee_per_gas: self + .max_priority_fee_per_gas + .as_ref() + .map(ModelExt::to_proto), + input: self.input.to_vec(), + signature: self.signature.as_ref().map(ModelExt::to_proto), + chain_id: self.chain_id, + access_list: self + .access_list + .as_ref() + .map(|l| l.iter().map(ModelExt::to_proto).collect()) + .unwrap_or_default(), + transaction_type: self.transaction_type.unwrap_or_default() as u64, + max_fee_per_blob_gas: self.max_fee_per_blob_gas.as_ref().map(ModelExt::to_proto), + blob_versioned_hashes: self + .blob_versioned_hashes + .as_ref() + .map(|l| l.iter().map(ModelExt::to_proto).collect()) + .unwrap_or_default(), + transaction_status: 0, + } + } +} + +impl ModelExt for models::Withdrawal { + type Proto = evm::Withdrawal; + + fn to_proto(&self) -> Self::Proto { + evm::Withdrawal { + filter_ids: Vec::new(), + withdrawal_index: u32::MAX, + index: self.index, + validator_index: self.validator_index as u32, + address: self.address.to_proto().into(), + amount: self.amount, + } + } +} + +impl ModelExt for models::TransactionReceipt { + type Proto = evm::TransactionReceipt; + + fn to_proto(&self) -> Self::Proto { + evm::TransactionReceipt { + filter_ids: Vec::new(), + transaction_index: u32::MAX, + transaction_hash: self.transaction_hash.to_proto().into(), + cumulative_gas_used: self.inner.cumulative_gas_used().to_proto().into(), + gas_used: self.gas_used.to_proto().into(), + effective_gas_price: self.effective_gas_price.to_proto().into(), + from: self.from.to_proto().into(), + to: self.to.as_ref().map(ModelExt::to_proto), + contract_address: self.contract_address.as_ref().map(ModelExt::to_proto), + logs_bloom: self.inner.logs_bloom().to_proto().into(), + transaction_type: self.transaction_type() as u8 as u64, + blob_gas_used: self.blob_gas_used.as_ref().map(ModelExt::to_proto), + blob_gas_price: self.blob_gas_price.as_ref().map(ModelExt::to_proto), + transaction_status: if self.status() { + evm::TransactionStatus::Succeeded as i32 + } else { + evm::TransactionStatus::Reverted as i32 + }, + } + } +} + +impl ModelExt for models::Log { + type Proto = evm::Log; + + fn to_proto(&self) -> Self::Proto { + evm::Log { + filter_ids: Vec::new(), + log_index: u32::MAX, + address: self.address().to_proto().into(), + topics: self.topics().iter().map(ModelExt::to_proto).collect(), + data: self.inner.data.data.to_vec(), + transaction_index: u32::MAX, + transaction_hash: None, + transaction_status: 0, + } + } +} + +impl ModelExt for models::Signature { + type Proto = evm::Signature; + + fn to_proto(&self) -> Self::Proto { + evm::Signature { + r: self.r.to_proto().into(), + s: self.s.to_proto().into(), + v: self.v.to_proto().into(), + y_parity: self.y_parity.map(|p| p.0), + } + } +} + +impl ModelExt for models::AccessListItem { + type Proto = evm::AccessListItem; + + fn to_proto(&self) -> Self::Proto { + evm::AccessListItem { + address: self.address.to_proto().into(), + storage_keys: self.storage_keys.iter().map(ModelExt::to_proto).collect(), + } + } +} + +impl ModelExt for models::B256 { + type Proto = evm::B256; + + fn to_proto(&self) -> Self::Proto { + evm::B256::from_bytes(&self.0) + } +} + +impl ModelExt for models::U256 { + type Proto = evm::U256; + + fn to_proto(&self) -> Self::Proto { + evm::U256::from_bytes(&self.to_be_bytes()) + } +} + +impl ModelExt for models::Address { + type Proto = evm::Address; + + fn to_proto(&self) -> Self::Proto { + evm::Address::from_bytes(&self.0) + } +} + +impl ModelExt for models::Bloom { + type Proto = evm::Bloom; + + fn to_proto(&self) -> Self::Proto { + evm::Bloom { + value: self.0.to_vec(), + } + } +} + +impl ModelExt for u128 { + type Proto = evm::U128; + + fn to_proto(&self) -> Self::Proto { + evm::U128::from_bytes(&self.to_be_bytes()) + } +} diff --git a/evm/src/provider/http.rs b/evm/src/provider/http.rs new file mode 100644 index 00000000..8337014b --- /dev/null +++ b/evm/src/provider/http.rs @@ -0,0 +1,137 @@ +use std::{sync::Arc, time::Duration}; + +use alloy_primitives::BlockHash; +use alloy_provider::{network::Ethereum, Provider, ProviderBuilder}; +use alloy_rpc_client::ClientBuilder; +use alloy_transport::BoxTransport; +use error_stack::{Result, ResultExt}; +use reqwest::header::{HeaderMap, HeaderValue}; +use url::Url; + +pub use alloy_rpc_types::BlockId; + +use super::models; + +#[derive(Debug)] +pub enum JsonRpcProviderError { + Request, + Timeout, + NotFound, + Configuration, +} + +#[derive(Debug, Clone)] +pub struct JsonRpcProviderOptions { + /// Request timeout. + pub timeout: Duration, + /// Request headers. + pub headers: HeaderMap, +} + +#[derive(Clone)] +pub struct JsonRpcProvider { + provider: Arc>, + options: JsonRpcProviderOptions, +} + +impl JsonRpcProvider { + pub fn new(url: Url, options: JsonRpcProviderOptions) -> Result { + if !options.headers.is_empty() { + return Err(JsonRpcProviderError::Configuration) + .attach_printable("custom headers are not supported"); + } + + let client = ClientBuilder::default().http(url); + let provider = ProviderBuilder::default().on_client(client).boxed(); + + Ok(Self { + provider: Arc::new(provider), + options, + }) + } + + pub async fn get_block_header( + &self, + block_id: BlockId, + ) -> Result { + let request = match block_id { + BlockId::Number(number) => self + .provider + .client() + .request::<_, Option>( + "eth_getBlockByNumber", + (number, false), + ) + .boxed(), + BlockId::Hash(hash) => { + let hash = BlockHash::from(hash); + self.provider + .client() + .request::<_, Option>( + "eth_getBlockByHash", + (hash, false), + ) + .boxed() + } + }; + + let Ok(response) = tokio::time::timeout(self.options.timeout, request).await else { + return Err(JsonRpcProviderError::Timeout) + .attach_printable("failed to get block header") + .attach_printable_lazy(|| format!("block id: {block_id:?}")); + }; + + response + .change_context(JsonRpcProviderError::Request)? + .ok_or(JsonRpcProviderError::NotFound.into()) + } + + pub async fn get_block_with_transactions( + &self, + block_id: BlockId, + ) -> Result { + let request = self + .provider + .get_block(block_id, alloy_rpc_types::BlockTransactionsKind::Full); + + let Ok(response) = tokio::time::timeout(self.options.timeout, request).await else { + return Err(JsonRpcProviderError::Timeout) + .attach_printable("failed to get block with transactions") + .attach_printable_lazy(|| format!("block id: {block_id:?}")); + }; + + response + .change_context(JsonRpcProviderError::Request)? + .ok_or(JsonRpcProviderError::NotFound.into()) + } + + pub async fn get_block_receipts( + &self, + block_id: BlockId, + ) -> Result, JsonRpcProviderError> { + let request = self.provider.get_block_receipts(block_id); + + let Ok(response) = tokio::time::timeout(self.options.timeout, request).await else { + return Err(JsonRpcProviderError::Timeout) + .attach_printable("failed to get block with receipts") + .attach_printable_lazy(|| format!("block id: {block_id:?}")); + }; + + response + .change_context(JsonRpcProviderError::Request)? + .ok_or(JsonRpcProviderError::NotFound.into()) + } +} + +impl error_stack::Context for JsonRpcProviderError {} + +impl std::fmt::Display for JsonRpcProviderError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + JsonRpcProviderError::Request => write!(f, "failed to send request"), + JsonRpcProviderError::Timeout => write!(f, "request timed out"), + JsonRpcProviderError::NotFound => write!(f, "not found"), + JsonRpcProviderError::Configuration => write!(f, "configuration error"), + } + } +} diff --git a/evm/src/provider/mod.rs b/evm/src/provider/mod.rs new file mode 100644 index 00000000..6f965550 --- /dev/null +++ b/evm/src/provider/mod.rs @@ -0,0 +1,5 @@ +mod http; +pub mod models; + +pub use self::http::{BlockId, JsonRpcProvider, JsonRpcProviderError, JsonRpcProviderOptions}; +pub use self::models::BlockExt; diff --git a/evm/src/provider/models.rs b/evm/src/provider/models.rs new file mode 100644 index 00000000..b591e703 --- /dev/null +++ b/evm/src/provider/models.rs @@ -0,0 +1,32 @@ +pub use alloy_primitives::{Address, Bloom, B256, U128, U256}; +pub use alloy_rpc_types::{ + AccessListItem, Block, Header, Log, Signature, Transaction, TransactionReceipt, Withdrawal, +}; +use apibara_dna_common::{chain::BlockInfo, Cursor, Hash}; + +pub type BlockWithTxHashes = Block; + +pub trait BlockExt { + fn cursor(&self) -> Cursor; + fn block_info(&self) -> BlockInfo; +} + +impl BlockExt for Block { + fn cursor(&self) -> Cursor { + let number = self.header.number; + let hash = self.header.hash; + Cursor::new(number, Hash(hash.to_vec())) + } + + fn block_info(&self) -> BlockInfo { + let number = self.header.number; + let hash = self.header.hash; + let parent = self.header.parent_hash; + + BlockInfo { + number, + hash: Hash(hash.to_vec()), + parent: Hash(parent.to_vec()), + } + } +} diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 7f135c4e..00000000 --- a/examples/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Apibara Example Scripts - -This folder contains example scripts used to index onchain data with Apibara. - -Notice that the scripts use well-known libraries such as viem and starknet.js, -this means it's possible to share code between frontend and indexer. - -**Running** - -Running the examples requires you to install the Apibara CLI tools (TODO: link -to instructions) and to create a free Apibara API key. - -```bash -apibara run /path/to/script.js -``` - -## Folder Structure - -- `common/`: this folder contains the scripts to transform data, shared between - all examples. -- `webhook/`: show how to use the webhook integration. -- `postgres/`: show how to use the PostgreSQL integration. -- `mongo/`: show how to use the MongoDB integration. -- `parquet/`: show how to use the Parquet integration. - -## Networks - -### Starknet - -These examples stream and decode ETH `Transfer` events. For each transfer, we -extract the following data: - -- block number and hash -- timestamp -- transaction hash -- transfer id: the transaction hash + event index -- the address sending ETH -- the address receiving ETH -- the amount, both raw and formatted diff --git a/examples/common/starknet.js b/examples/common/starknet.js deleted file mode 100644 index e3b8bdd5..00000000 --- a/examples/common/starknet.js +++ /dev/null @@ -1,56 +0,0 @@ -/* Starknet ETH indexer - * - * This file contains a filter and transform to index Starknet ETH transactions. - */ - -// You can import any library supported by Deno. -import { hash, uint256 } from "https://esm.run/starknet@5.14"; -import { formatUnits } from "https://esm.run/viem@1.4"; - -const DECIMALS = 18; -// Can read from environment variables if you want to. -// In that case, run with `--env-from-file .env` and put the following in .env: -// TOKEN_DECIMALS=18 -// const DECIMALS = Deno.env.get('TOKEN_DECIMALS') ?? 18; - -export const filter = { - // Only request header if any event matches. - header: { - weak: true, - }, - events: [ - { - fromAddress: - "0x049D36570D4e46f48e99674bd3fcc84644DdD6b96F7C741B1562B82f9e004dC7", - keys: [hash.getSelectorFromName("Transfer")], - includeReceipt: false, - }, - ], -}; - -export function decodeTransfersInBlock({ header, events }) { - const { blockNumber, blockHash, timestamp } = header; - return events.map(({ event, transaction }) => { - const transactionHash = transaction.meta.hash; - const transferId = `${transactionHash}_${event.index}`; - - const [fromAddress, toAddress, amountLow, amountHigh] = event.data; - const amountRaw = uint256.uint256ToBN({ low: amountLow, high: amountHigh }); - const amount = formatUnits(amountRaw, DECIMALS); - - // Convert to snake_case because it works better with postgres. - return { - network: "starknet-goerli", - symbol: "ETH", - block_hash: blockHash, - block_number: +blockNumber, - block_timestamp: timestamp, - transaction_hash: transactionHash, - transfer_id: transferId, - from_address: fromAddress, - to_address: toAddress, - amount: +amount, - amount_raw: amountRaw.toString(), - }; - }); -} diff --git a/examples/console/starknet_to_console.js b/examples/console/starknet_to_console.js deleted file mode 100644 index 5969d3cd..00000000 --- a/examples/console/starknet_to_console.js +++ /dev/null @@ -1,14 +0,0 @@ -import { decodeTransfersInBlock, filter } from "../common/starknet.js"; - -// Configure indexer for streaming Starknet Goerli data starting at the specified block. -export const config = { - streamUrl: "https://sepolia.starknet.a5a.ch", - startingBlock: 1_000, - network: "starknet", - filter, - sinkType: "console", - sinkOptions: {}, -}; - -// Transform each block using the function defined in starknet.js. -export default decodeTransfersInBlock; diff --git a/examples/deno.json b/examples/deno.json deleted file mode 100644 index 0967ef42..00000000 --- a/examples/deno.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/examples/mongo/README.md b/examples/mongo/README.md deleted file mode 100644 index 1438619f..00000000 --- a/examples/mongo/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Apibara 🤝 MongoDB - -_Mirror onchain data to a MongoDB collection._ - -**Use cases** - -- Start collecting data quickly, without worrying about table schemas. -- Build the backend for you dapp. -- Create complex analytics queries using the Mongo Pipeline API. - -**Usage** - -You must set the `MONGO_CONNECTION_STRING` environment variable to the one -provided by your MongoDB provider. - -For developing locally, we provide a `docker-compose.yml` file that starts -MongoDB and Mongo Express. Run it with: - -``` -docker compose up -``` - -Then export the following environment variable: - -``` -export MONGO_CONNECTION_STRING='mongodb://mongo:mongo@localhost:27017' -``` - -You can then run the script with `apibara run`. Visit -http://localhost:8081/db/example/transfers to see a list of all documents being -inserted by Apibara into your collection. You can check on the progress by -refreshing the page. diff --git a/examples/mongo/docker-compose.yml b/examples/mongo/docker-compose.yml deleted file mode 100644 index c0def6f8..00000000 --- a/examples/mongo/docker-compose.yml +++ /dev/null @@ -1,27 +0,0 @@ -### MongoDB with Mongo Express -version: "3.6" -services: - mongo: - image: mongo:6.0.8 - restart: always - ports: - - "27017:27017" - volumes: - - mongo_data:/data/db - environment: - MONGO_INITDB_ROOT_USERNAME: mongo - MONGO_INITDB_ROOT_PASSWORD: mongo - - mongo-express: - image: mongo-express:latest - restart: always - ports: - - "8081:8081" - environment: - ME_CONFIG_BASICAUTH_USERNAME: mongo - ME_CONFIG_BASICAUTH_PASSWORD: mongo - ME_CONFIG_MONGODB_ADMINUSERNAME: mongo - ME_CONFIG_MONGODB_ADMINPASSWORD: mongo - ME_CONFIG_MONGODB_URL: mongodb://mongo:mongo@mongo:27017/ -volumes: - mongo_data: diff --git a/examples/mongo/starknet_to_mongo.js b/examples/mongo/starknet_to_mongo.js deleted file mode 100644 index 0d2cb837..00000000 --- a/examples/mongo/starknet_to_mongo.js +++ /dev/null @@ -1,26 +0,0 @@ -// Before running this script, you must start a local MongoDB server. -// See README.md for instructions. -import { decodeTransfersInBlock, filter } from "../common/starknet.js"; - -// Configure indexer for streaming Starknet Goerli data starting at the specified block. -export const config = { - streamUrl: "https://sepolia.starknet.a5a.ch", - startingBlock: 53_000, - network: "starknet", - finality: "DATA_STATUS_PENDING", - filter, - sinkType: "mongo", - sinkOptions: { - database: "example", - collectionName: "transfers", - }, - // Restrict invalidate queries to the specified network and symbol. - // This is useful if you are running multiple indexers on the same database. - invalidate: { - network: "starknet-goerli", - symbol: "ETH", - }, -}; - -// Transform each block using the function defined in starknet.js. -export default decodeTransfersInBlock; diff --git a/examples/parquet/README.md b/examples/parquet/README.md deleted file mode 100644 index 1256ce34..00000000 --- a/examples/parquet/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Apibara 🤝 Parquet - -_Create original datasets in one command._ - -**Use cases** - -- Create datasets locally and explore them with Python or SQL (DuckDB). -- Integrate with Big Data tools. -- Aggregate large amount of data quickly. - -**Usage** - -Set the `PARQUET_OUTPUT_DIR` environment variable to the path of the directory -where the integration will write data to. - -``` -mkdir /path/to/my/data -export PARQUET_OUTPUT_DIR=/path/to/my/data -``` - -Run the script with `apibara run`. After the indexer has processed a hundred -blocks, you will start seeing `.parquet` files appearing in your folder. We can -use [DuckDB](https://duckdb.org/) to quickly analyze this data. - -Number of transfer events ingested: - -```sql -select count(1) from "/path/to/my/data/*.parquet"; -``` - -Total transfer amount by block: - -```sql -select - block_number, sum(amount) as total_transfer -from - "/path/to/my/data/*.parquet" -group by - block_number -order by - total_transfer desc; -``` - -You can combine DuckDB with -[Youplot](https://github.com/red-data-tools/YouPlot/) to create beautiful plots -from the command line. - -```bash -duckdb -s "copy(select date_trunc('week', block_timestamp::timestamp) as date, sum(amount) as amount from '/tmp/example/*.parquet' group by date_trunc('week', block_timestamp::timestamp) order by amount desc) to '/dev/stdout' with (format 'csv', header)" | uplot bar -d, -t "Top weekly transfer volume" -``` - -![Top weekly transfer volume](https://github.com/apibara/dna/assets/282580/1dc70fd9-3b33-40f6-aab4-921c441ee3ad) diff --git a/examples/parquet/starknet_to_parquet.js b/examples/parquet/starknet_to_parquet.js deleted file mode 100644 index bba80413..00000000 --- a/examples/parquet/starknet_to_parquet.js +++ /dev/null @@ -1,21 +0,0 @@ -// See README.md for instructions. -import { decodeTransfersInBlock, filter } from "../common/starknet.js"; - -// Configure indexer for streaming Starknet Sepolia data starting at the specified block. -export const config = { - streamUrl: "https://sepolia.starknet.a5a.ch", - startingBlock: 1_000, - network: "starknet", - filter, - sinkType: "parquet", - sinkOptions: { - // Files will have data for 100 blocks each. - // In reality, you want this number to be higher (like 1_000), - // but for the sake of this example, we keep it low to generate - // files quickly. - batchSize: 100, - }, -}; - -// Transform each block using the function defined in starknet.js. -export default decodeTransfersInBlock; diff --git a/examples/postgres/README.md b/examples/postgres/README.md deleted file mode 100644 index fd3c2172..00000000 --- a/examples/postgres/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# Apibara 🤝 PostgreSQL - -_Mirror onchain data to a PostgreSQL table._ - -**Use cases** - -- Quickly develop a backend for your dapp by sending data to Supabase or Hasura. -- Build internal dashboards with Retool or Illa. -- Join offchain and onchain data. - -## Usage - -You must set the `POSTGRES_CONNECTION_STRING` environment variable to the one -provided by your PostgreSQL provider. - -For developing locally, we provide a `docker-compose.yml` file that -starts Hasura locally. Run it with: - -``` -docker-compose up -``` - -Then export the following environment variable: - -``` -export POSTGRES_CONNECTION_STRING='postgres://postgres:postgres@localhost:5432/postgres' -``` - -Follow -[the steps on the official Hasura -documentation](https://hasura.io/docs/latest/getting-started/docker-simple/#step-2-connect-a-database) -to connect to the database and create the following table (TL;DR: visit -http://localhost:8080 and use `PG_DATABASE_URL` to connect Hasura to -PostgreSQL). - -**Notice**: the `_cursor` column is REQUIRED by Apibara to automatically -invalidate data following chain reorganizations. - -### Standard mode - -```sql -create table transfers( - network text, -- network name, e.g. starknet-goerli - symbol text, -- token symbol, e.g. ETH - block_hash text, -- hex encoded block hash - block_number bigint, - block_timestamp timestamp, - transaction_hash text, -- hex encoded transaction hash - transfer_id text, -- unique transfer id - from_address text, -- address sending the token - to_address text, -- address receiving the token - amount numeric, -- amount as float. Some precision is lost, but we can aggregate it - amount_raw text, -- amount, as bigint - _cursor bigint -- REQUIRED: needed for data invalidation -); -``` - -After creating the `transfers` table, you can run the script using -`apibara run`. Visit http://localhost:8080/console to play around with your new -GraphQL API! For example, run the following query to check the indexing -progress. - -```graphql -query TransferCount { - transfers_aggregate { - aggregate { - count - } - } -} -``` - -**Notice**: some queries may be slow because we haven't created any index yet. -Refer to the -[Hasura documentation to improve query -performance](https://hasura.io/docs/latest/queries/postgres/performance/). diff --git a/examples/postgres/docker-compose.yml b/examples/postgres/docker-compose.yml deleted file mode 100644 index 8269aa89..00000000 --- a/examples/postgres/docker-compose.yml +++ /dev/null @@ -1,52 +0,0 @@ -### PostgreSQL with Hasura -## -## https://hasura.io/docs/latest/getting-started/docker-simple/ -version: "3.6" -services: - postgres: - image: postgres:15 - restart: always - ports: - - "5432:5432" - volumes: - - db_data:/var/lib/postgresql/data - environment: - POSTGRES_PASSWORD: postgres - graphql-engine: - image: hasura/graphql-engine:v2.30.0 - ports: - - "8080:8080" - restart: always - environment: - ## postgres database to store Hasura metadata - HASURA_GRAPHQL_METADATA_DATABASE_URL: postgres://postgres:postgres@postgres:5432/postgres - ## this env var can be used to add the above postgres database to Hasura as a data source. this can be removed/updated based on your needs - PG_DATABASE_URL: postgres://postgres:postgres@postgres:5432/postgres - ## enable the console served by server - HASURA_GRAPHQL_ENABLE_CONSOLE: "true" # set to "false" to disable console - ## enable debugging mode. It is recommended to disable this in production - HASURA_GRAPHQL_DEV_MODE: "true" - HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log - HASURA_GRAPHQL_METADATA_DEFAULTS: '{"backend_configs":{"dataconnector":{"athena":{"uri":"http://data-connector-agent:8081/api/v1/athena"},"mariadb":{"uri":"http://data-connector-agent:8081/api/v1/mariadb"},"mysql8":{"uri":"http://data-connector-agent:8081/api/v1/mysql"},"oracle":{"uri":"http://data-connector-agent:8081/api/v1/oracle"},"snowflake":{"uri":"http://data-connector-agent:8081/api/v1/snowflake"}}}}' - depends_on: - data-connector-agent: - condition: service_healthy - data-connector-agent: - image: hasura/graphql-data-connector:v2.30.0 - restart: always - ports: - - 8081:8081 - environment: - QUARKUS_LOG_LEVEL: ERROR # FATAL, ERROR, WARN, INFO, DEBUG, TRACE - ## https://quarkus.io/guides/opentelemetry#configuration-reference - QUARKUS_OPENTELEMETRY_ENABLED: "false" - ## QUARKUS_OPENTELEMETRY_TRACER_EXPORTER_OTLP_ENDPOINT: http://jaeger:4317 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8081/api/v1/athena/health"] - interval: 5s - timeout: 10s - retries: 5 - start_period: 5s -volumes: - db_data: - diff --git a/examples/postgres/starknet_to_postgres.js b/examples/postgres/starknet_to_postgres.js deleted file mode 100644 index 8b1b91fe..00000000 --- a/examples/postgres/starknet_to_postgres.js +++ /dev/null @@ -1,19 +0,0 @@ -// Before running this script, you must setup your database to include a `transfers` table. -// See README.md for instructions. -import { decodeTransfersInBlock, filter } from "../common/starknet.js"; - -// Configure indexer for streaming Starknet Sepolia data starting at the specified block. -export const config = { - streamUrl: "https://sepolia.starknet.a5a.ch", - startingBlock: 1_000, - network: "starknet", - filter, - sinkType: "postgres", - sinkOptions: { - noTls: true, - tableName: "transfers", - }, -}; - -// Transform each block using the function defined in starknet.js. -export default decodeTransfersInBlock; diff --git a/examples/webhook/README.md b/examples/webhook/README.md deleted file mode 100644 index f3653a6d..00000000 --- a/examples/webhook/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Apibara 🤝 Webhooks - -_Send the result of transforming a batch to a webhook._ - -**Use cases** - -- Serverless indexers using Cloudflare Functions, AWS Lambda, or any serverless - function. -- Start a background job on Inngest or other job queues. -- Push messages into a message queue like Kafka or AWS SNS. - -**Usage** - -You must set the `WEBHOOK_TARGET_URL` environment variable to the target url. -Alternatively, you can update the scripts with your target url in the -`sinkOptions` field. diff --git a/examples/webhook/docker-compose.yml b/examples/webhook/docker-compose.yml deleted file mode 100644 index 5882cd98..00000000 --- a/examples/webhook/docker-compose.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: "3.6" -services: - echo: - image: mendhak/http-https-echo - ports: - - 8080:8080 diff --git a/examples/webhook/starknet_to_webhook.js b/examples/webhook/starknet_to_webhook.js deleted file mode 100644 index 4e3cdbeb..00000000 --- a/examples/webhook/starknet_to_webhook.js +++ /dev/null @@ -1,18 +0,0 @@ -import { decodeTransfersInBlock, filter } from "../common/starknet.js"; - -// Configure indexer for streaming Starknet Sepolia data starting at the specified block. -export const config = { - streamUrl: "https://sepolia.starknet.a5a.ch", - startingBlock: 1_000, - network: "starknet", - filter, - sinkType: "webhook", - sinkOptions: { - // Send data as returned by `transform`. - // When `raw = false`, the data is sent together with the starting and end cursor. - raw: true, - }, -}; - -// Transform each block using the function defined in starknet.js. -export default decodeTransfersInBlock; diff --git a/flake.lock b/flake.lock index e012fc41..8258e14f 100644 --- a/flake.lock +++ b/flake.lock @@ -1,22 +1,16 @@ { "nodes": { "crane": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ] - }, "locked": { - "lastModified": 1710671070, - "narHash": "sha256-XrSUtoeTAw/qiTqqotS1H3d5WANHvXAC+LV0TVIhX0Y=", - "owner": "fracek", + "lastModified": 1727316705, + "narHash": "sha256-/mumx8AQ5xFuCJqxCIOFCHTVlxHkMT21idpbgbm/TIE=", + "owner": "ipetkov", "repo": "crane", - "rev": "5be1e3c6643a9aad66d4f13da0d22416814bbb16", + "rev": "5b03654ce046b5167e7b0bccbd8244cb56c16f0e", "type": "github" }, "original": { - "owner": "fracek", - "ref": "5be1e3c664", + "owner": "ipetkov", "repo": "crane", "type": "github" } @@ -26,29 +20,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1726560853, + "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", "type": "github" }, "original": { @@ -59,16 +35,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1704290814, - "narHash": "sha256-LWvKHp7kGxk/GEtlrGYV68qIvPHkU9iToomNFGagixU=", + "lastModified": 1727540905, + "narHash": "sha256-40J9tW7Y794J7Uw4GwcAKlMxlX2xISBl6IBigo83ih8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "70bdadeb94ffc8806c0570eb5c2695ad29f0e421", + "rev": "fbca5e745367ae7632731639de5c21f29c8744ed", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-24.05", "repo": "nixpkgs", "type": "github" } @@ -83,17 +59,16 @@ }, "rust-overlay": { "inputs": { - "flake-utils": "flake-utils_2", "nixpkgs": [ "nixpkgs" ] }, "locked": { - "lastModified": 1710382258, - "narHash": "sha256-2FW1q+o34VBweYQiEkRaSEkNMq3ecrn83VzETeGiVbY=", + "lastModified": 1727663505, + "narHash": "sha256-83j/GrHsx8GFUcQofKh+PRPz6pz8sxAsZyT/HCNdey8=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "8ce81e71ab04a7e906fae62da086d6ee5d6cfc21", + "rev": "c2099c6c7599ea1980151b8b6247a8f93e1806ee", "type": "github" }, "original": { @@ -116,21 +91,6 @@ "repo": "default", "type": "github" } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 53c4e9f0..cf24adbb 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "Apibara development environment"; inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05"; + nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05"; flake-utils = { url = "github:numtide/flake-utils"; }; @@ -11,17 +11,15 @@ inputs.nixpkgs.follows = "nixpkgs"; }; crane = { - url = "github:fracek/crane/5be1e3c664"; - inputs.nixpkgs.follows = "nixpkgs"; + url = "github:ipetkov/crane"; }; }; - outputs = { self, nixpkgs, rust-overlay, flake-utils, crane, ... }: + outputs = { nixpkgs, rust-overlay, flake-utils, crane, ... }: flake-utils.lib.eachDefaultSystem (system: let overlays = [ (import rust-overlay) - (import ./nix/overlay.nix) ]; pkgs = import nixpkgs { @@ -29,86 +27,25 @@ }; crates = { - starknet = { - description = "The Starknet DNA server"; - path = ./starknet; - ports = { - "7171/tcp" = { }; - }; - }; - operator = { - description = "The Apibara Kubernetese Operator"; - path = ./operator; - ports = { - "8118/tcp" = { }; - }; - }; - sink-console = { - description = "Print stream data to the console"; - path = ./sinks/sink-console; - volumes = { - "/data" = { }; - }; - ports = { - "8118/tcp" = { }; - }; - }; - sink-webhook = { - description = "Integration to connect onchain data to HTTP endpoints"; - path = ./sinks/sink-webhook; - volumes = { - "/data" = { }; - }; + dna-beaconchain = { + description = "The Beacon Chain DNA server"; + path = ./beaconchain; ports = { - "8118/tcp" = { }; + "7007/tcp" = { }; }; }; - sink-mongo = { - description = "Integration to populate a MongoDB collection with onchain data"; - path = ./sinks/sink-mongo; - volumes = { - "/data" = { }; - }; + dna-evm = { + description = "The EVM DNA server"; + path = ./evm; ports = { - "8118/tcp" = { }; + "7007/tcp" = { }; }; }; - sink-postgres = { - description = "Integration to populate a PostgreSQL table with onchain data"; - path = ./sinks/sink-postgres; - volumes = { - "/data" = { }; - }; - ports = { - "8118/tcp" = { }; - }; - }; - sink-parquet = { - description = "Integration to generate a Parquet dataset from onchain data"; - path = ./sinks/sink-parquet; - volumes = { - "/data" = { }; - }; - ports = { - "8118/tcp" = { }; - }; - }; - cli = { - description = "Apibara CLI tool"; - path = ./cli; - binaryName = "apibara"; - extraBinaries = [ - "sink-console" - "sink-webhook" - "sink-postgres" - "sink-mongo" - "sink-parquet" - ]; - volumes = { - "/data" = { }; - }; + dna-starknet = { + description = "The Starknet DNA server"; + path = ./starknet; ports = { - "8118/tcp" = { }; + "7007/tcp" = { }; }; }; }; diff --git a/install/Dockerfile.test b/install/Dockerfile.test deleted file mode 100644 index 50b490e0..00000000 --- a/install/Dockerfile.test +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:22.04 - -WORKDIR / -RUN apt-get update && apt-get install -y curl jq gzip -COPY install.sh . -RUN chmod +x install.sh -RUN ./install.sh -# Check installer added installation to path -RUN cat ~/.bashrc | grep ".local/share/apibara/bin" diff --git a/install/README.md b/install/README.md deleted file mode 100644 index 89acdb22..00000000 --- a/install/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Apibara CLI installation script - -This folder contains the installation script for the Apibara CLI. - -## Testing - -Build the `Dockerfile.test` image with the following command: - -```sh -docker build --no-cache -t cli-test -f Dockerfile.test . -``` - -If the build succeeds, the installation script is working. diff --git a/install/install.sh b/install/install.sh deleted file mode 100644 index 43cd03e7..00000000 --- a/install/install.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=dash - -set -euo pipefail - - -DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}" -APIBARA_ROOT_DIR="${APIBARA_ROOT_DIR:-$DATA_DIR/apibara}" -APIBARA_REPO="${APIBARA_REPO:-apibara/dna}" - -main() { - say "installing Apibara CLI to ${APIBARA_ROOT_DIR}" - need_cmd curl - need_cmd jq - need_cmd gzip - - get_arch || exit 1 - - local _arch="$RETVAL" - assert_nz "$_arch" "arch" - - local _release_tag - _release_tag=$( - curl -s "https://api.github.com/repos/${APIBARA_REPO}/releases?per_page=100" \ - | jq -r '.[] | select((.prerelease==false) and (.tag_name | startswith("cli"))) | .tag_name' \ - | head -n 1 - ) - assert_nz "$_release_tag" "release tag" - - local _release_version="${_release_tag#cli/v}" - assert_nz "$_release_version" "release version" - say "installing CLI version $_release_version for $_arch" - - local _release_url="https://github.com/apibara/dna/releases/download/$_release_tag/cli-$_arch.gz" - local _bin_dir="${APIBARA_ROOT_DIR}/bin" - mkdir -p "$_bin_dir" - ensure curl -Ls "$_release_url" > "$_bin_dir/apibara.gz" - ensure gzip -f -d "$_bin_dir/apibara.gz" - ensure chmod +x "$_bin_dir/apibara" - - say "checking installation" - ensure "$_bin_dir/apibara" --version - - say "adding installation to PATH" - local _profile _shell - case "$SHELL" in - */bash) - _profile="$HOME/.bashrc" - _shell="bash" - ;; - */zsh) - _profile="${ZDOTDIR:-$HOME}/.zshenv" - _shell="zsh" - ;; - - */fish) - _profile="$HOME/.config/fish/config.fish" - _shell="fish" - ;; - *) - err "could not detect shell. Add '$_bin_dir' to your PATH" - esac - - say "detected your shell as $_shell." - - # Add only if not already in PATH - # shellcheck disable=SC2035 - if test ":$PATH:" != *":$_bin_dir:"*; then - ensure echo "# Added by the Apibara installer" >> "$_profile" - ensure echo "export PATH=\"\$PATH:$_bin_dir\"" >> "$_profile" - fi - - say "added the installation to your PATH. Run 'source $_profile' or start a new terminal to use apibara" - say "" - say "Documentation: https://www.apibara.com/docs" - say "GitHub: https://github.com/apibara" - say "Twitter: https://www.twitter.com/apibara_web3" - say "Discord: https://discord.gg/m7B92CNFNt" - -} - -get_arch() { - local _ostype _cputype _arch - _ostype="$(uname -s)" - _cputype="$(uname -m)" - - case "$_ostype" in - Linux) - _ostype=linux - ;; - Darwin) - _ostype=macos - ;; - *) - err "unrecognized OS type: $_ostype" - ;; - esac - - case "$_cputype" in - aarch64 | arm64) - _cputype=aarch64 - ;; - x86_64 | x86-64 | x64 | amd64) - _cputype=x86_64 - ;; - *) - err "unsupported CPU type: $_cputype" - ;; - esac - - _arch="${_cputype}-${_ostype}" - - RETVAL="$_arch" -} - -say() { - printf 'apibara-installer: %s\n' "$1" - need_cmd uname -} - -err() { - say "$1" >&2 - exit 1 -} - -need_cmd() { - if ! check_cmd "$1"; then - err "command '$1' is required but not available" - fi -} - -check_cmd() { - command -v "$1" > /dev/null 2>&1 -} - -assert_nz() { - if [ -z "$1" ]; then - err "assert_nz failed: $2" - fi -} - -ensure() { - if ! "$@"; then - err "command failed: $*" - fi -} - -main "$@" || exit 1 diff --git a/nix/crates.nix b/nix/crates.nix index 0b59551d..96a10128 100644 --- a/nix/crates.nix +++ b/nix/crates.nix @@ -5,12 +5,15 @@ let extensions = [ "rust-src" "rust-analyzer" ]; }; + nightlyRustToolchain = pkgs.rust-bin.selectLatestNightlyWith (toolchain: toolchain.default); + craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain; src = pkgs.lib.cleanSourceWith { src = craneLib.path workspaceDir; filter = path: type: (builtins.match ".*proto$" path != null) # include protobufs + || (builtins.match ".*fbs$" path != null) # include flatbuffers || (builtins.match ".*js$" path != null) # include js (for deno runtime) || (craneLib.filterCargoSources path type); # include rust/cargo }; @@ -18,14 +21,22 @@ let buildArgs = ({ nativeBuildInputs = with pkgs; [ cargo-nextest + cargo-flamegraph + # cargo-llvm-cov + cargo-edit + # cargo-udeps + samply clang cmake llvmPackages.libclang.lib + libllvm pkg-config protobuf + flatbuffers rustToolchain - openssl + openssl.dev jq + sqlite ] ++ pkgs.lib.optional stdenv.isDarwin (with pkgs.darwin.apple_sdk.frameworks; [ CoreFoundation CoreServices @@ -33,11 +44,9 @@ let SystemConfiguration ]); - buildInputs = with pkgs; [ - librusty_v8 - ]; + # LLVM_COV = "${pkgs.libllvm}/bin/llvm-cov"; + # LLVM_PROFDATA = "${pkgs.libllvm}/bin/llvm-profdata"; - RUSTY_V8_ARCHIVE = "${pkgs.librusty_v8}/lib/librusty_v8.a"; # used by bindgen LIBCLANG_PATH = pkgs.lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib @@ -98,7 +107,7 @@ let ''; checkPhaseCargoCommand = '' - cargo nextest archive --cargo-profile $CARGO_PROFILE --archive-format tar-zst --archive-file $out/archive.tar.zst + cargo nextest archive --cargo-profile $CARGO_PROFILE --workspace --archive-format tar-zst --archive-file $out/archive.tar.zst ''; }); @@ -322,12 +331,31 @@ in inputsFrom = [ allCrates ]; - - buildInputs = buildArgs.buildInputs ++ [ - pkgs.kubernetes-helm + buildInputs = with pkgs; [ + kubernetes-helm + tokio-console ]; }); + nightly = pkgs.mkShell (buildArgs // { + nativeBuildInputs = with pkgs; [ + nightlyRustToolchain + cargo-udeps + clang + cmake + llvmPackages.libclang.lib + pkg-config + protobuf + flatbuffers + openssl + ] ++ pkgs.lib.optional stdenv.isDarwin (with pkgs.darwin.apple_sdk.frameworks; [ + CoreFoundation + CoreServices + Security + SystemConfiguration + ]); + }); + # Integration tests require an internet connection, which is # not available inside the Nix build environment. # Use the generated nextest archive and run the tests outside of diff --git a/nix/overlay.nix b/nix/overlay.nix deleted file mode 100644 index ad8cff94..00000000 --- a/nix/overlay.nix +++ /dev/null @@ -1,7 +0,0 @@ -final: prev: -{ - # rusty_v8 downloads a prebuilt v8 from github, so we need to prefetch it - # and pass it to the builder. - librusty_v8 = prev.callPackage ./packages/librusty_v8.nix { }; -} - diff --git a/nix/packages/librusty_v8.nix b/nix/packages/librusty_v8.nix deleted file mode 100644 index e8de916e..00000000 --- a/nix/packages/librusty_v8.nix +++ /dev/null @@ -1,47 +0,0 @@ -{ rust, stdenv, lib, fetchurl }: - -let - arch = rust.toRustTarget stdenv.hostPlatform; - fetch_librusty_v8 = args: fetchurl { - name = "librusty_v8-${args.version}"; - url = "https://github.com/denoland/rusty_v8/releases/download/v${args.version}/librusty_v8_release_${arch}.a"; - sha256 = args.shas.${stdenv.hostPlatform.system}; - meta = { inherit (args) version; }; - }; -in -stdenv.mkDerivation rec { - pname = "librusty_v8"; - version = "0.81.0"; - - src = fetch_librusty_v8 { - inherit version; - shas = { - x86_64-linux = "sha256-e77LYm/sus7EY4eiRuEp6G25djDaT4wSD4FBCxy4vcE="; - aarch64-linux = "sha256-wPfUcuT2Z2sy5nLf8xR3QjGQKk6OsM/45jnYv/Hw+Zs="; - x86_64-darwin = "sha256-UbnRiywM7b7q3rITZzNeWAuKU+HXXAqVapQ9j5ND6go="; - aarch64-darwin = "sha256-42d3VGBv5lW1InfzYfWr6Xj0GpyJ6GWswVNtUa8ID30="; - }; - }; - - dontUnpack = true; - - installPhase = '' - mkdir -p $out/lib - cp $src $out/lib/librusty_v8.a - - mkdir -p $out/lib/pkgconfig - cat > $out/lib/pkgconfig/rusty_v8.pc << EOF - Name: rusty_v8 - Description: V8 JavaScript Engine - Version: ${version} - Libs: -L $out/lib - EOF - ''; - - meta = with lib; { - description = "Rust bindings for the V8 JavaScript engine"; - homepage = "https://crates.io/crates/v8"; - license = licenses.mit; - platforms = [ "x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ]; - }; -} diff --git a/node/Cargo.toml b/node/Cargo.toml deleted file mode 100644 index 8fab39d0..00000000 --- a/node/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "apibara-node" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -repository.workspace = true -license.workspace = true - -[lib] -name = "apibara_node" -path = "src/lib.rs" - -[dependencies] -apibara-core = { path = "../core" } -apibara-observability = { path = "../observability" } -arrayvec.workspace = true -async-stream.workspace = true -async-trait.workspace = true -byte-unit.workspace = true -byteorder.workspace = true -dirs.workspace = true -futures.workspace = true -governor.workspace = true -hyper.workspace = true -lazy_static.workspace = true -libmdbx = "0.1.7" -opentelemetry.workspace = true -opentelemetry-otlp.workspace = true -pin-project.workspace = true -prost.workspace = true -thiserror.workspace = true -tokio.workspace = true -tokio-stream.workspace = true -tokio-util.workspace = true -tonic.workspace = true -tower.workspace = true -tracing.workspace = true -tracing-opentelemetry.workspace = true -tracing-subscriber.workspace = true -tracing-tree.workspace = true - -[dev-dependencies] -assert_matches.workspace = true -tempfile.workspace = true diff --git a/node/README.md b/node/README.md deleted file mode 100644 index f5c7d314..00000000 --- a/node/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Apibara Node - -A node combines and transforms multiple input streams into a new output stream. - -## Stream Protocol - -All messages in the Apibara stream protocol are identified by a sequence number. -Clients must check that the messages they receive have increasing sequence -numbers without gaps (except in the case of message invalidation). - -The following messages are part of the protocol: - -- `Data(sequence, data)`: contains the message data together with its sequence - number. -- `Invalidate(sequence)`: informs the client that all messages with sequence - number greater than or equal to the specified `sequence` are now invalid. The - stream will resume by sending the new messages from the specified `sequence` - number. - -Invalidation is needed because web3 data is not finalized immediately. Chain -reorganizations cause blocks that were previously considered canonical to be -removed from the canonical chain. By making data invalidation a core part of the -protocol, nodes can push information about chain reorganizations downstream. - -## Node Implementation - -A node is responsible for tracking the state of each input stream across -restarts. It also manages the sequence number generator so that output messages -are correctly sequenced. Finally, the node stores the messages generated by the -stream to persistent storage so that they can be replied to clients that connect -at a later point in time. - -The data transformation and aggregation is performed by applications. -Applications communicate with the node through gRPC and must be started -separately. - -## Source Nodes - -Nodes that ingest data from an outside source and into Apibara are called -_source nodes_. Source nodes can, for example, ingest data from a blockchain -node and generate a stream of Apibara messages. Blockchain nodes can implement -the Apibara stream protocol directly for lower latency. - -Source nodes are not limited to blockchain nodes: an HTTP server can generate a -stream of user actions (e.g. `CommentPosted`, `PostLiked`, `FriendRequestSent`, -etc.) to create applications that mix off-chain and on-chain data. diff --git a/node/src/core.rs b/node/src/core.rs deleted file mode 100644 index f5fa278c..00000000 --- a/node/src/core.rs +++ /dev/null @@ -1,10 +0,0 @@ -use apibara_core::node::v1alpha2::Cursor as ProtoCursor; - -/// A cursor is a position in a stream. -pub trait Cursor: Sized + Default + Clone + std::fmt::Debug { - /// Create a new cursor from a proto cursor. - fn from_proto(cursor: &ProtoCursor) -> Option; - - /// Returns the proto cursor. - fn to_proto(&self) -> ProtoCursor; -} diff --git a/node/src/db/chain_tracker.rs b/node/src/db/chain_tracker.rs deleted file mode 100644 index 4d76c944..00000000 --- a/node/src/db/chain_tracker.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! Head tracker related tables - -use std::{io::Cursor, marker::PhantomData}; - -use apibara_core::stream::MessageData; -use byteorder::{BigEndian, ReadBytesExt}; -use prost::Message; - -use super::{table::KeyDecodeError, Table, TableKey}; - -/// A block's hash. -pub trait BlockHash: - Send + Sync + AsRef<[u8]> + Sized + PartialEq + Clone + std::fmt::Debug -{ - fn from_slice(b: &[u8]) -> Result; - fn zero() -> Self; -} - -/// A blockchain block with the associated hash type. -pub trait Block: Send + Sync + MessageData { - type Hash: BlockHash; - - /// Returns the block number or height. - fn number(&self) -> u64; - - /// Returns the block hash. - fn hash(&self) -> &Self::Hash; - - /// Returns the block's parent hash. - fn parent_hash(&self) -> &Self::Hash; -} - -/// Table with the block headers. -#[derive(Debug, Clone, Copy, Default)] -pub struct BlockTable { - phantom: PhantomData, -} - -/// Table with the block hashes of the canonical chain. -#[derive(Debug, Clone, Copy, Default)] -pub struct CanonicalBlockTable { - phantom: PhantomData, -} - -/// Hash of a block belonging to the canonical chain. -#[derive(Clone, PartialEq, Message)] -pub struct CanonicalBlock { - #[prost(bytes, tag = "1")] - pub hash: Vec, -} - -impl Table for CanonicalBlockTable -where - H: Send + Sync + BlockHash, -{ - type Key = u64; - type Value = CanonicalBlock; - - fn db_name() -> &'static str { - "CanonicalBlock" - } -} - -impl Table for BlockTable -where - B: Send + Sync + Block, -{ - type Key = (u64, B::Hash); - type Value = B; - - fn db_name() -> &'static str { - "Block" - } -} - -impl TableKey for (u64, H) -where - H: BlockHash, -{ - type Encoded = Vec; - - fn encode(&self) -> Self::Encoded { - let mut out = Vec::new(); - out.extend_from_slice(&self.0.to_be_bytes()); - out.extend_from_slice(self.1.as_ref()); - out - } - - fn decode(b: &[u8]) -> Result { - let mut cursor = Cursor::new(b); - let block_number = cursor - .read_u64::() - .map_err(KeyDecodeError::ReadError)?; - let block_hash = H::from_slice(&b[8..])?; - Ok((block_number, block_hash)) - } -} diff --git a/node/src/db/cli.rs b/node/src/db/cli.rs deleted file mode 100644 index 206ff4df..00000000 --- a/node/src/db/cli.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! # Extensions and functions to manage cli db options - -use std::path::PathBuf; - -/// Returns the path to the local node's data dir. -pub fn default_data_dir() -> Option { - dirs::data_local_dir().map(|d| d.join("apibara")) -} diff --git a/node/src/db/mdbx.rs b/node/src/db/mdbx.rs deleted file mode 100644 index f868a006..00000000 --- a/node/src/db/mdbx.rs +++ /dev/null @@ -1,392 +0,0 @@ -use std::{marker::PhantomData, ops::Range, path::Path}; - -use apibara_core::stream::{MessageData, RawMessageData}; -use libmdbx::{ - Cursor, Database, DatabaseFlags, Environment, EnvironmentBuilder, EnvironmentKind, - Error as MdbxError, Geometry, TableObject, Transaction, TransactionKind, WriteFlags, RW, -}; -use prost::Message; - -use super::{ - table::{Table, TableKey}, - DupSortTable, -}; - -/// A type-safe view over a mdbx database. -pub struct MdbxTable<'txn, T, K, E> -where - T: Table, - K: TransactionKind, - E: EnvironmentKind, -{ - txn: &'txn Transaction<'txn, K, E>, - db: Database<'txn>, - phantom: PhantomData, -} - -/// A cursor over items in a `MdbxTable`. -pub struct TableCursor<'txn, T, K> -where - T: Table, - K: TransactionKind, -{ - cursor: Cursor<'txn, K>, - phantom: PhantomData, -} - -/// Result value of any mdbx operation. -pub type MdbxResult = Result; - -/// Configure and open a mdbx environment. -pub struct MdbxEnvironmentBuilder { - env: EnvironmentBuilder, - max_dbs: usize, - geometry: Geometry>, -} - -/// Extension methods over mdbx environment. -pub trait MdbxEnvironmentExt { - /// Open a mdbx environment with the default configuration. - fn open(path: &Path) -> MdbxResult>; - - /// Creates a new mdbx environment builder. - fn builder() -> MdbxEnvironmentBuilder; -} - -/// Extension methods over mdbx RO and RW transactions. -pub trait MdbxTransactionExt { - /// Open a database accessed through a type-safe [MdbxTable]. - fn open_table(&self) -> MdbxResult>; - - /// Shorthand for `open_table()?.cursor()?;` - /// - /// Cannot use `cursor` as name since it's a method on transaction. - fn open_cursor(&self) -> MdbxResult>; -} - -/// Extension methods over mdbx RW transactions. -pub trait MdbxRWTransactionExt { - /// Ensure the given table database exists. Creates it if it doesn't. - fn ensure_table(&self, flags: Option) -> MdbxResult<()>; -} - -impl MdbxEnvironmentExt for Environment { - fn open(path: &Path) -> MdbxResult> { - let mut builder = Environment::new(); - builder.set_max_dbs(16); - builder.open(path) - } - - fn builder() -> MdbxEnvironmentBuilder { - MdbxEnvironmentBuilder::new() - } -} - -impl MdbxEnvironmentBuilder { - /// Create a new environment builder. - pub fn new() -> MdbxEnvironmentBuilder { - let env = Environment::new(); - // set reasonable default geometry. - let min_size = byte_unit::n_gib_bytes!(10) as usize; - let max_size = byte_unit::n_gib_bytes!(100) as usize; - let growth_step = byte_unit::n_gib_bytes(2) as isize; - let geometry = Geometry { - size: Some(min_size..max_size), - growth_step: Some(growth_step), - shrink_threshold: None, - page_size: None, - }; - MdbxEnvironmentBuilder { - env, - max_dbs: 100, - geometry, - } - } - - /// Change the database size in GiB. - pub fn with_size_gib(mut self, min_size: usize, max_size: usize) -> Self { - let min_size = byte_unit::n_gib_bytes(min_size as u128) as usize; - let max_size = byte_unit::n_gib_bytes(max_size as u128) as usize; - self.geometry.size = Some(min_size..max_size); - self - } - - /// Change the database growth size in GiB. - pub fn with_growth_step_gib(mut self, step: isize) -> Self { - let step = byte_unit::n_gib_bytes(step as u128) as isize; - self.geometry.growth_step = Some(step); - self - } - - /// Open the environment. - pub fn open(mut self, path: &Path) -> MdbxResult> { - self.env - .set_geometry(self.geometry) - .set_max_dbs(self.max_dbs) - .open(path) - } -} - -impl Default for MdbxEnvironmentBuilder { - fn default() -> Self { - Self::new() - } -} - -impl<'env, K, E> MdbxTransactionExt for Transaction<'env, K, E> -where - K: TransactionKind, - E: EnvironmentKind, -{ - fn open_table(&self) -> MdbxResult> { - let database = self.open_db(Some(T::db_name()))?; - Ok(MdbxTable { - txn: self, - db: database, - phantom: Default::default(), - }) - } - - fn open_cursor(&self) -> MdbxResult> { - self.open_table::()?.cursor() - } -} - -impl<'env, E: EnvironmentKind> MdbxRWTransactionExt for Transaction<'env, RW, E> { - fn ensure_table(&self, flags: Option) -> MdbxResult<()> { - let flags = flags.unwrap_or_default(); - let name = T::db_name(); - self.create_db(Some(name), flags)?; - Ok(()) - } -} - -#[derive(Debug, Clone)] -struct TableObjectWrapper(T); - -impl<'txn, T> TableObject<'txn> for TableObjectWrapper -where - T: Message + Default + Clone, -{ - fn decode(data_val: &[u8]) -> MdbxResult - where - Self: Sized, - { - T::decode(data_val) - .map_err(|err| MdbxError::DecodeError(Box::new(err))) - .map(Self) - } -} - -#[derive(Debug, Clone)] -struct RawTableObjectWrapper(RawMessageData); - -impl<'txn, T> TableObject<'txn> for RawTableObjectWrapper -where - T: MessageData, -{ - fn decode(data_val: &[u8]) -> MdbxResult - where - Self: Sized, - { - Ok(Self(RawMessageData::from_vec(data_val.to_vec()))) - } -} - -#[derive(Debug, Clone)] -struct TableKeyWrapper(T); - -impl<'txn, T> TableObject<'txn> for TableKeyWrapper -where - T: TableKey, -{ - fn decode(data_val: &[u8]) -> MdbxResult - where - Self: Sized, - { - T::decode(data_val) - .map_err(|err| MdbxError::DecodeError(Box::new(err))) - .map(Self) - } -} - -impl<'txn, T, K, E> MdbxTable<'txn, T, K, E> -where - T: Table, - K: TransactionKind, - E: EnvironmentKind, -{ - /// Returns a cursor over the items in the table. - pub fn cursor(&self) -> MdbxResult> { - let cursor = self.txn.cursor(&self.db)?; - Ok(TableCursor { - cursor, - phantom: Default::default(), - }) - } - - /// Get an item in the table by its `key`. - pub fn get(&self, key: &T::Key) -> MdbxResult> { - let data = self - .txn - .get::>(&self.db, key.encode().as_ref())?; - Ok(data.map(|d| d.0)) - } -} - -impl<'txn, T, K> TableCursor<'txn, T, K> -where - T: Table, - K: TransactionKind, -{ - /// Get key/data at current cursor position. - pub fn get_current(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.get_current()) - } - /// Position at the first item. - pub fn first(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.first()) - } - - /// Position at the last item. - pub fn last(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.last()) - } - - /// Position at the next item. - #[allow(clippy::should_implement_trait)] - pub fn next(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.next()) - } - - /// Position at the previous item. - pub fn prev(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.prev()) - } - - /// Position at the specified key. - pub fn seek_exact(&mut self, key: &T::Key) -> MdbxResult> { - map_kv_result::(self.cursor.set_key(key.encode().as_ref())) - } - - /// Position at the specified key and return the raw value . - #[allow(clippy::type_complexity)] - pub fn seek_exact_raw( - &mut self, - key: &T::Key, - ) -> MdbxResult)>> { - raw_map_kv_result::(self.cursor.set_key(key.encode().as_ref())) - } - - /// Position at the first key greater than or equal to the specified key. - pub fn seek_range(&mut self, key: &T::Key) -> MdbxResult> { - map_kv_result::(self.cursor.set_range(key.encode().as_ref())) - } -} - -impl<'txn, T, K> TableCursor<'txn, T, K> -where - T: DupSortTable, - K: TransactionKind, -{ - /// Position at the first item of the current key. - pub fn first_dup(&mut self) -> MdbxResult> { - Ok(self - .cursor - .first_dup::>()? - .map(|d| d.0)) - } - - /// Position at the last item of the current key. - pub fn last_dup(&mut self) -> MdbxResult> { - Ok(self - .cursor - .last_dup::>()? - .map(|d| d.0)) - } - - /// Position at the next item of the current key. - pub fn next_dup(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.next_dup()) - } - - /// Position at the first item of the next key. - pub fn next_no_dup(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.next_nodup()) - } - - /// Position at the previous item of the current key. - pub fn prev_dup(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.prev_dup()) - } - - /// Position at the first item of the previous key. - pub fn prev_no_dup(&mut self) -> MdbxResult> { - map_kv_result::(self.cursor.prev_nodup()) - } -} - -impl<'txn, T> TableCursor<'txn, T, RW> -where - T: Table, -{ - pub fn put(&mut self, key: &T::Key, value: &T::Value) -> MdbxResult<()> { - let data = T::Value::encode_to_vec(value); - self.cursor - .put(key.encode().as_ref(), &data, WriteFlags::default())?; - Ok(()) - } - - /// Delete the first cursor/data item. - pub fn del(&mut self) -> MdbxResult<()> { - self.cursor.del(WriteFlags::default()) - } -} - -impl<'txn, T> TableCursor<'txn, T, RW> -where - T: DupSortTable, -{ - pub fn append_dup(&mut self, key: &T::Key, value: &T::Value) -> MdbxResult<()> { - let data = T::Value::encode_to_vec(value); - self.cursor - .put(key.encode().as_ref(), &data, WriteFlags::APPEND_DUP)?; - Ok(()) - } -} - -#[allow(clippy::type_complexity)] -fn map_kv_result( - t: MdbxResult, TableObjectWrapper)>>, -) -> MdbxResult> -where - T: Table, -{ - if let Some((k, v)) = t? { - return Ok(Some((k.0, v.0))); - } - Ok(None) -} - -#[allow(clippy::type_complexity)] -fn raw_map_kv_result( - t: MdbxResult, RawTableObjectWrapper)>>, -) -> MdbxResult)>> -where - T: Table, -{ - if let Some((k, v)) = t? { - return Ok(Some((k.0, v.0))); - } - Ok(None) -} - -pub trait MdbxErrorExt { - fn decode_error(err: E) -> MdbxError; -} - -impl MdbxErrorExt for MdbxError { - fn decode_error(err: E) -> MdbxError { - MdbxError::DecodeError(Box::new(err)) - } -} diff --git a/node/src/db/message_storage.rs b/node/src/db/message_storage.rs deleted file mode 100644 index 9047e100..00000000 --- a/node/src/db/message_storage.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Message storage related tables. - -use std::marker::PhantomData; - -use apibara_core::stream::Sequence; -use prost::Message; - -use super::Table; - -/// Table with messages by sequence. -#[derive(Debug, Clone, Copy, Default)] -pub struct MessageTable { - phantom: PhantomData, -} - -impl Table for MessageTable -where - M: Message + Default + Clone, -{ - type Key = Sequence; - type Value = M; - - fn db_name() -> &'static str { - "Message" - } -} diff --git a/node/src/db/mod.rs b/node/src/db/mod.rs deleted file mode 100644 index e858dbc1..00000000 --- a/node/src/db/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! # Node Database -//! -//! This module provides all the abstractions over storage. -mod chain_tracker; -mod cli; -mod mdbx; -mod message_storage; -mod sequencer; -mod table; - -pub use self::cli::default_data_dir; -pub use self::mdbx::{ - MdbxEnvironmentExt, MdbxErrorExt, MdbxRWTransactionExt, MdbxTable, MdbxTransactionExt, - TableCursor, -}; -pub use self::table::{ByteVec, DupSortTable, KeyDecodeError, Table, TableKey}; - -pub mod tables { - pub use super::chain_tracker::{ - Block, BlockHash, BlockTable, CanonicalBlock, CanonicalBlockTable, - }; - pub use super::message_storage::MessageTable; - pub use super::sequencer::{ - SequencerState, SequencerStateTable, StreamState, StreamStateTable, - }; -} - -pub mod libmdbx { - pub use libmdbx::*; -} diff --git a/node/src/db/sequencer.rs b/node/src/db/sequencer.rs deleted file mode 100644 index 5d1a9d4f..00000000 --- a/node/src/db/sequencer.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! Sequencer-related tables. - -use apibara_core::stream::{Sequence, StreamId}; -use prost::Message; - -use super::Table; - -/// Table with the state of each input sequence, together with the respective -/// output range. -#[derive(Debug, Clone, Copy, Default)] -pub struct SequencerStateTable; - -/// Store the output's start and end sequence for a given stream id and input -/// sequence. -/// -/// Since the output sequence is strictly increasing, we can use the input -/// sequence to order the state and that will also keep the output's sequence -/// ordered. -/// -/// Mark fields as optional to enforce serializing the `0` value. -#[derive(Clone, PartialEq, Message)] -pub struct SequencerState { - #[prost(fixed64, optional, tag = "1")] - pub output_sequence_start: Option, - #[prost(fixed64, optional, tag = "2")] - pub output_sequence_end: Option, -} - -impl Table for SequencerStateTable { - type Key = (StreamId, Sequence); - type Value = SequencerState; - - fn db_name() -> &'static str { - "SequencerState" - } -} - -/// Table with the state of each input stream. -#[derive(Debug, Clone, Copy, Default)] -pub struct StreamStateTable; - -/// Contains the most recent sequence number of each input stream. -#[derive(Clone, PartialEq, Message)] -pub struct StreamState { - #[prost(fixed64, optional, tag = "1")] - pub sequence: Option, -} - -impl Table for StreamStateTable { - type Key = StreamId; - type Value = StreamState; - - fn db_name() -> &'static str { - "StreamState" - } -} - -#[cfg(test)] -mod tests { - use apibara_core::stream::{Sequence, StreamId}; - use libmdbx::{Environment, NoWriteMap}; - use tempfile::tempdir; - - use crate::db::{ - sequencer::StreamStateTable, MdbxEnvironmentExt, MdbxRWTransactionExt, MdbxTransactionExt, - }; - - use super::{SequencerState, SequencerStateTable}; - - #[test] - fn test_state_order() { - let path = tempdir().unwrap(); - let db = Environment::::open(path.path()).unwrap(); - let stream_id = StreamId::from_u64(1); - let value_low = SequencerState { - output_sequence_start: Some(0), - output_sequence_end: Some(1), - }; - let value_mid = SequencerState { - output_sequence_start: Some(2), - output_sequence_end: Some(2), - }; - let value_high = SequencerState { - output_sequence_start: Some(3), - output_sequence_end: Some(4), - }; - - let txn = db.begin_rw_txn().unwrap(); - txn.ensure_table::(None).unwrap(); - txn.ensure_table::(None).unwrap(); - let table = txn.open_table::().unwrap(); - let mut cursor = table.cursor().unwrap(); - cursor.first().unwrap(); - // insert three values in the wrong order, then check if they're stored in order. - cursor - .put(&(stream_id, Sequence::from_u64(1)), &value_mid) - .unwrap(); - cursor - .put(&(stream_id, Sequence::from_u64(2)), &value_high) - .unwrap(); - cursor - .put(&(stream_id, Sequence::from_u64(0)), &value_low) - .unwrap(); - txn.commit().unwrap(); - - let txn = db.begin_ro_txn().unwrap(); - let table = txn.open_table::().unwrap(); - let mut cursor = table.cursor().unwrap(); - let ((stream_id, input_seq), _value) = cursor - .seek_range(&(stream_id, Sequence::from_u64(0))) - .unwrap() - .unwrap(); - assert_eq!(stream_id.as_u64(), 1); - assert_eq!(input_seq.as_u64(), 0); - - let ((stream_id, input_seq), _value) = cursor.next().unwrap().unwrap(); - assert_eq!(stream_id.as_u64(), 1); - assert_eq!(input_seq.as_u64(), 1); - - let ((stream_id, input_seq), _value) = cursor.next().unwrap().unwrap(); - assert_eq!(stream_id.as_u64(), 1); - assert_eq!(input_seq.as_u64(), 2); - - let value = cursor.next().unwrap(); - assert!(value.is_none()); - txn.commit().unwrap(); - } -} diff --git a/node/src/db/table.rs b/node/src/db/table.rs deleted file mode 100644 index c3e9faf8..00000000 --- a/node/src/db/table.rs +++ /dev/null @@ -1,156 +0,0 @@ -//! Type-safe database access. - -use std::io::Cursor; - -use apibara_core::stream::{Sequence, StreamId}; -use arrayvec::ArrayVec; -use byteorder::{BigEndian, ReadBytesExt}; -use prost::Message; - -/// Error related to decoding keys. -#[derive(Debug, thiserror::Error)] -pub enum KeyDecodeError { - #[error("invalid key bytes size")] - InvalidByteSize { expected: usize, actual: usize }, - #[error("error reading key from bytes")] - ReadError(#[from] std::io::Error), - #[error("Other type of error")] - Other(Box), -} - -/// A fixed-capacity vector of bytes. -#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord)] -pub struct ByteVec(ArrayVec); - -pub trait TableKey: Send + Sync + Sized { - type Encoded: AsRef<[u8]> + Send + Sync; - - fn encode(&self) -> Self::Encoded; - fn decode(b: &[u8]) -> Result; -} - -pub trait Table: Send + Sync { - type Key: TableKey; - type Value: Message + Default + Clone; - - fn db_name() -> &'static str; -} - -pub trait DupSortTable: Table {} - -impl AsRef<[u8]> for ByteVec { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl TableKey for StreamId { - type Encoded = [u8; 8]; - - fn encode(&self) -> Self::Encoded { - self.as_u64().to_be_bytes() - } - - fn decode(b: &[u8]) -> Result { - if b.len() != 8 { - return Err(KeyDecodeError::InvalidByteSize { - expected: 8, - actual: b.len(), - }); - } - let mut cursor = Cursor::new(b); - let stream_id = cursor - .read_u64::() - .map_err(KeyDecodeError::ReadError)?; - Ok(StreamId::from_u64(stream_id)) - } -} - -impl TableKey for Sequence { - type Encoded = [u8; 8]; - - fn encode(&self) -> Self::Encoded { - self.as_u64().to_be_bytes() - } - - fn decode(b: &[u8]) -> Result { - if b.len() != 8 { - return Err(KeyDecodeError::InvalidByteSize { - expected: 8, - actual: b.len(), - }); - } - let mut cursor = Cursor::new(b); - let sequence = cursor - .read_u64::() - .map_err(KeyDecodeError::ReadError)?; - Ok(Sequence::from_u64(sequence)) - } -} - -impl TableKey for (StreamId, Sequence) { - type Encoded = [u8; 16]; - - fn encode(&self) -> Self::Encoded { - let mut out = [0; 16]; - out[..8].copy_from_slice(&self.0.encode()); - out[8..].copy_from_slice(&self.1.encode()); - out - } - - fn decode(b: &[u8]) -> Result { - if b.len() != 16 { - return Err(KeyDecodeError::InvalidByteSize { - expected: 16, - actual: b.len(), - }); - } - let mut cursor = Cursor::new(b); - let stream_id = cursor - .read_u64::() - .map_err(KeyDecodeError::ReadError)?; - let sequence = cursor - .read_u64::() - .map_err(KeyDecodeError::ReadError)?; - Ok((StreamId::from_u64(stream_id), Sequence::from_u64(sequence))) - } -} - -impl TableKey for () { - type Encoded = [u8; 0]; - - fn encode(&self) -> Self::Encoded { - [] - } - - fn decode(b: &[u8]) -> Result { - if !b.is_empty() { - return Err(KeyDecodeError::InvalidByteSize { - expected: 0, - actual: b.len(), - }); - } - Ok(()) - } -} - -impl TableKey for u64 { - type Encoded = [u8; 8]; - - fn encode(&self) -> Self::Encoded { - self.to_be_bytes() - } - - fn decode(b: &[u8]) -> Result { - if b.len() != 8 { - return Err(KeyDecodeError::InvalidByteSize { - expected: 8, - actual: b.len(), - }); - } - let mut cursor = Cursor::new(b); - cursor - .read_u64::() - .map_err(KeyDecodeError::ReadError) - } -} diff --git a/node/src/lib.rs b/node/src/lib.rs deleted file mode 100644 index 4d620741..00000000 --- a/node/src/lib.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub mod core; -pub mod db; -pub mod message_storage; -pub mod message_stream; -pub mod o11y; -pub mod server; -pub mod stream; - -pub use async_trait::async_trait; diff --git a/node/src/message_storage.rs b/node/src/message_storage.rs deleted file mode 100644 index 2e65a111..00000000 --- a/node/src/message_storage.rs +++ /dev/null @@ -1,295 +0,0 @@ -//! Store messages in mdbx - -use std::{marker::PhantomData, sync::Arc}; - -use apibara_core::stream::{MessageData, RawMessageData, Sequence}; -use libmdbx::{Environment, EnvironmentKind, Error as MdbxError, Transaction, RO, RW}; - -use crate::db::{tables, MdbxRWTransactionExt, MdbxTransactionExt, TableCursor}; - -pub trait MessageStorage { - type Error: std::error::Error + Send + Sync + 'static; - - /// Retrieves a message with the given sequencer number, if any. - fn get( - &self, - sequence: &Sequence, - ) -> std::result::Result>, Self::Error>; -} - -/// Store messages in mdbx. -pub struct MdbxMessageStorage { - db: Arc>, - phantom: PhantomData, -} - -/// [MessageStorage]-related error. -#[derive(Debug, thiserror::Error)] -pub enum MdbxMessageStorageError { - #[error("message has the wrong sequence number")] - InvalidMessageSequence { expected: u64, actual: u64 }, - #[error("error originating from database")] - Database(#[from] MdbxError), -} - -pub type Result = std::result::Result; - -pub struct MessageIterator<'txn, E: EnvironmentKind, M: MessageData> { - _txn: Transaction<'txn, RO, E>, - current: Option>, - cursor: TableCursor<'txn, tables::MessageTable, RO>, -} - -impl MdbxMessageStorage -where - E: EnvironmentKind, - M: MessageData, -{ - /// Create a new message store, persisting data to the given mdbx environment. - pub fn new(db: Arc>) -> Result { - let txn = db.begin_rw_txn()?; - txn.ensure_table::>(None)?; - txn.commit()?; - Ok(MdbxMessageStorage { - db, - phantom: PhantomData, - }) - } - - /// Insert the given `message` in the store. - /// - /// Expect `sequence` to be the successor of the current highest sequence number. - pub fn insert(&self, sequence: &Sequence, message: &M) -> Result<()> { - let txn = self.db.begin_rw_txn()?; - self.insert_with_txn(sequence, message, &txn)?; - txn.commit()?; - Ok(()) - } - - /// Same as `insert` but using the given [Transaction]. - pub fn insert_with_txn( - &self, - sequence: &Sequence, - message: &M, - txn: &Transaction, - ) -> Result<()> { - let table = txn.open_table::>()?; - let mut cursor = table.cursor()?; - - match cursor.last()? { - None => { - // First element, assert sequence is 0 - if sequence.as_u64() != 0 { - return Err(MdbxMessageStorageError::InvalidMessageSequence { - expected: 0, - actual: sequence.as_u64(), - }); - } - cursor.put(sequence, message)?; - Ok(()) - } - Some((prev_sequence, _)) => { - if sequence.as_u64() != prev_sequence.as_u64() + 1 { - return Err(MdbxMessageStorageError::InvalidMessageSequence { - expected: prev_sequence.as_u64() + 1, - actual: sequence.as_u64(), - }); - } - cursor.put(sequence, message)?; - Ok(()) - } - } - } - - /// Delete all messages with sequence number greater than or equal the given `sequence`. - /// - /// Returns the number of messages deleted. - pub fn invalidate(&self, sequence: &Sequence) -> Result { - let txn = self.db.begin_rw_txn()?; - let invalidated = self.invalidate_with_txn(sequence, &txn)?; - txn.commit()?; - Ok(invalidated) - } - - /// Same as `invalidate` but using the given [Transaction]. - pub fn invalidate_with_txn( - &self, - sequence: &Sequence, - txn: &Transaction, - ) -> Result { - let table = txn.open_table::>()?; - let mut cursor = table.cursor()?; - - let mut count = 0; - loop { - match cursor.last()? { - None => break, - Some((key, _)) => { - if key.as_u64() < sequence.as_u64() { - break; - } - cursor.del()?; - count += 1; - } - } - } - Ok(count) - } - - /// Returns an iterator over all messages, starting at the given `start` index. - pub fn iter_from(&self, start: &Sequence) -> Result> { - let txn = self.db.begin_ro_txn()?; - let table = txn.open_table::>()?; - let mut cursor = table.cursor()?; - let current = cursor.seek_exact(start)?.map(|v| Ok(v.1)); - Ok(MessageIterator { - cursor, - _txn: txn, - current, - }) - } -} - -impl MessageStorage for MdbxMessageStorage -where - E: EnvironmentKind, - M: MessageData, -{ - type Error = MdbxMessageStorageError; - - fn get(&self, sequence: &Sequence) -> Result>> { - let txn = self.db.begin_rw_txn()?; - let table = txn.open_table::>()?; - let mut cursor = table.cursor()?; - let data = cursor.seek_exact_raw(sequence)?.map(|t| t.1); - Ok(data) - } -} - -impl MessageStorage for Arc> -where - E: EnvironmentKind, - M: MessageData, -{ - type Error = MdbxMessageStorageError; - - fn get(&self, sequence: &Sequence) -> Result>> { - let txn = self.db.begin_rw_txn()?; - let table = txn.open_table::>()?; - let mut cursor = table.cursor()?; - let data = cursor.seek_exact_raw(sequence)?.map(|t| t.1); - Ok(data) - } -} - -impl<'txn, E, M> Iterator for MessageIterator<'txn, E, M> -where - E: EnvironmentKind, - M: MessageData, -{ - type Item = Result; - - fn next(&mut self) -> Option { - match self.current.take() { - None => None, - Some(value) => { - self.current = match self.cursor.next() { - Err(err) => Some(Err(err.into())), - Ok(None) => None, - Ok(Some(value)) => Some(Ok(value.1)), - }; - Some(value) - } - } - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use apibara_core::stream::Sequence; - use libmdbx::{Environment, NoWriteMap}; - use tempfile::tempdir; - - use crate::db::MdbxEnvironmentExt; - - use super::MdbxMessageStorage; - - #[derive(Clone, PartialEq, prost::Message)] - pub struct Transfer { - #[prost(string, tag = "1")] - pub sender: String, - #[prost(string, tag = "2")] - pub receiver: String, - } - - #[test] - pub fn test_message_storage() { - let path = tempdir().unwrap(); - let db = Environment::::open(path.path()).unwrap(); - let storage = MdbxMessageStorage::<_, Transfer>::new(Arc::new(db)).unwrap(); - - // first message must have index 0 - let t0_bad_sequence = Transfer { - sender: "ABC".to_string(), - receiver: "XYZ".to_string(), - }; - assert!(storage - .insert(&Sequence::from_u64(1), &t0_bad_sequence) - .is_err()); - - let t0 = Transfer { - sender: "ABC".to_string(), - receiver: "XYZ".to_string(), - }; - storage.insert(&Sequence::from_u64(0), &t0).unwrap(); - - // next message must have index 1 - let t1 = Transfer { - sender: "AOE".to_string(), - receiver: "TNS".to_string(), - }; - assert!(storage.insert(&Sequence::from_u64(0), &t1).is_err()); - assert!(storage.insert(&Sequence::from_u64(2), &t1).is_err()); - storage.insert(&Sequence::from_u64(1), &t1).unwrap(); - - let all_messages = storage - .iter_from(&Sequence::from_u64(0)) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert!(all_messages.len() == 2); - assert!(all_messages[0] == t0); - assert!(all_messages[1] == t1); - - // invalidate latest message - let count = storage.invalidate(&Sequence::from_u64(1)).unwrap(); - assert!(count == 1); - // second time is a noop - let count = storage.invalidate(&Sequence::from_u64(1)).unwrap(); - assert!(count == 0); - - let all_messages = storage - .iter_from(&Sequence::from_u64(0)) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(all_messages.len() == 1); - assert!(all_messages[0] == t0); - - // insert value again - assert!(storage.insert(&Sequence::from_u64(0), &t1).is_err()); - assert!(storage.insert(&Sequence::from_u64(2), &t1).is_err()); - storage.insert(&Sequence::from_u64(1), &t1).unwrap(); - - let all_messages = storage - .iter_from(&Sequence::from_u64(1)) - .unwrap() - .collect::, _>>() - .unwrap(); - assert!(all_messages.len() == 1); - assert!(all_messages[0] == t1); - } -} diff --git a/node/src/message_stream.rs b/node/src/message_stream.rs deleted file mode 100644 index 2528a180..00000000 --- a/node/src/message_stream.rs +++ /dev/null @@ -1,653 +0,0 @@ -//! Stream messages with backfilling. -//! -//! This object is used to create a continuous stream that -//! seamlessly switches between sending historical messages to -//! sending live messages. -//! It can also start streaming from a sequence number not yet -//! produced: in this case it waits until the live stream reaches -//! the target sequence number. -//! -//! ```txt -//! ┌───────────┐ -//! │ │ -//! Live────► ├───► Backfilled -//! │ │ Stream -//! └─────▲─────┘ -//! │ -//! │ -//! Message -//! Storage -//! ``` - -use std::{collections::VecDeque, marker::PhantomData, pin::Pin, task::Poll, time::Duration}; - -use apibara_core::stream::{MessageData, RawMessageData, Sequence, StreamMessage}; -use futures::Stream; -use pin_project::pin_project; -use tokio::time::Instant; -use tokio_util::sync::CancellationToken; -use tracing::debug; - -use crate::message_storage::MessageStorage; - -pub type LiveStreamItem = std::result::Result, Box>; - -#[pin_project] -pub struct BackfilledMessageStream -where - M: MessageData, - S: MessageStorage, - L: Stream>, -{ - storage: S, - #[pin] - live: L, - // state is in its own struct to play nicely with pin. - state: State, - pending_interval: Option, - ct: CancellationToken, - _phantom: PhantomData, -} - -#[derive(Debug, thiserror::Error)] -pub enum BackfilledMessageStreamError { - #[error("invalid live message sequence number")] - InvalidLiveSequence { expected: u64, actual: u64 }, - #[error("message with sequence {sequence} not found")] - MessageNotFound { sequence: u64 }, - #[error("error retrieving data from message storage")] - Storage(Box), - #[error("error retrieving data from live stream")] - LiveStream(Box), -} - -pub type Result = std::result::Result; - -/// Deadline for sending pending messages. -#[derive(Debug)] -enum PendingDeadline { - /// Don't send any pending messages. - None, - /// Send pending message immediately, ignoring any deadline. - /// Used to send a pending messages immediately after a new data message. - Immediately, - /// Send after instant. - Deadline(Instant), -} - -#[derive(Debug)] -struct State { - current: Sequence, - latest: Sequence, - buffer: VecDeque<(Sequence, RawMessageData)>, - pending_deadline: PendingDeadline, - pending_interval: Option, -} - -impl BackfilledMessageStream -where - M: MessageData, - S: MessageStorage, - L: Stream>, -{ - /// Creates a new `MessageStreamer`. - /// - /// Start streaming from the `current` message (inclusive), using `latest` as - /// hint about the most recently stored message. - /// Messages that are not `live` are streamed from the `storage`. - pub fn new( - current: Sequence, - latest: Sequence, - storage: S, - live: L, - pending_interval: Option, - ct: CancellationToken, - ) -> Self { - BackfilledMessageStream { - storage, - live, - state: State::new(current, latest, pending_interval), - pending_interval, - ct, - _phantom: PhantomData, - } - } -} - -impl Stream for BackfilledMessageStream -where - M: MessageData, - S: MessageStorage, - L: Stream>, -{ - type Item = Result>; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - // always check cancellation - if self.ct.is_cancelled() { - return Poll::Ready(None); - } - - // when receiving a `StreamMessage::Data` message the stream can perform - // three possible actions, depending on the stream state: - // - // current < latest: - // live stream: used to keep latest updated - // storage: used to read backfilled data and send it to stream - // current == latest: - // live stream: used to keep state updated and send data to stream - // storage: not used - // current > latest: - // live stream: used to keep track of state, but data is not sent - // storage: not used - // - // when receiving a `StreamMessage::Invalidate` message the stream - // can perform: - // - // current < invalidate: - // update latest from invalidate - // current >= invalidate: - // update current and latest from invalidate - // send invalidate message to stream - - let current = self.state.current; - let latest = self.state.latest; - - let mut this = self.project(); - - let live_message = { - match Pin::new(&mut this.live).poll_next(cx) { - Poll::Pending => { - // return pending and wake when live stream is ready - if current > latest { - return Poll::Pending; - } - None - } - Poll::Ready(None) => { - // live stream closed, try to keep sending backfilled messages - // but if it's live simply close this stream too. - if current >= latest { - return Poll::Ready(None); - } - None - } - Poll::Ready(Some(message)) => Some(message), - } - }; - - if let Some(message) = live_message { - match message { - Err(err) => { - return Poll::Ready(Some(Err(BackfilledMessageStreamError::LiveStream(err)))) - } - Ok(StreamMessage::Invalidate { sequence }) => { - debug!(sequence = ?sequence, "live invalidate"); - // clear buffer just in case - this.state.clear_buffer(); - this.state.update_latest(sequence); - - // all messages after `sequence` (inclusive) are now invalidated. - // forward invalidate message - if current >= sequence { - debug!(sequence = ?sequence, "send live invalidate"); - this.state.update_current(sequence); - let message = StreamMessage::Invalidate { sequence }; - return Poll::Ready(Some(Ok(message))); - } - - // buffer data was invalidated and new state updated - // let's stop here and start again - cx.waker().wake_by_ref(); - return Poll::Pending; - } - Ok(StreamMessage::Data { sequence, data }) => { - debug!(sequence = ?sequence, "live data"); - this.state.update_latest(sequence); - - // just send the message to the stream if it's the current one - if current == sequence { - debug!(sequence = ?sequence, "send live data"); - this.state.update_current(sequence); - this.state.increment_current(); - this.state.reset_pending_deadline_to_immediately(); - let message = StreamMessage::Data { sequence, data }; - return Poll::Ready(Some(Ok(message))); - } - - // no point in adding messages that won't be sent - if current < sequence { - // add message to buffer - this.state.add_live_message(sequence, data); - } - } - Ok(StreamMessage::Pending { sequence, data }) => { - debug!(sequence = ?sequence, "live pending"); - if sequence == current && this.state.is_pending_deadline_exceeded() { - debug!(sequence = ?sequence, "send live pending"); - let message = StreamMessage::Pending { sequence, data }; - this.state.reset_pending_deadline(); - return Poll::Ready(Some(Ok(message))); - } - } - } - } - - // stream is not interested in any messages we can send, so just - // restart and wait for more live data - if current > latest { - cx.waker().wake_by_ref(); - return Poll::Pending; - } - - // prioritize sending from buffer - if this.state.buffer_has_sequence(this.state.current()) { - match this.state.pop_buffer() { - None => { - let sequence = this.state.current().as_u64(); - return Poll::Ready(Some(Err(BackfilledMessageStreamError::MessageNotFound { - sequence, - }))); - } - Some((sequence, data)) => { - this.state.increment_current(); - let message = StreamMessage::Data { sequence, data }; - return Poll::Ready(Some(Ok(message))); - } - } - } - - // as last resort, send backfilled messages from storage - match this.storage.get(this.state.current()) { - Err(err) => { - let err = BackfilledMessageStreamError::Storage(Box::new(err)); - Poll::Ready(Some(Err(err))) - } - Ok(None) => { - let sequence = this.state.current().as_u64(); - Poll::Ready(Some(Err(BackfilledMessageStreamError::MessageNotFound { - sequence, - }))) - } - Ok(Some(message)) => { - let sequence = *this.state.current(); - this.state.increment_current(); - let message = StreamMessage::Data { - sequence, - data: message, - }; - Poll::Ready(Some(Ok(message))) - } - } - } -} - -impl State { - fn new(current: Sequence, latest: Sequence, pending_interval: Option) -> Self { - let pending_deadline = pending_interval - .map(|_| PendingDeadline::Immediately) - .unwrap_or(PendingDeadline::None); - State { - current, - latest, - buffer: VecDeque::default(), - pending_interval, - pending_deadline, - } - } - - fn current(&self) -> &Sequence { - &self.current - } - - fn increment_current(&mut self) { - self.current = Sequence::from_u64(self.current.as_u64() + 1); - } - - fn update_latest(&mut self, sequence: Sequence) { - self.latest = sequence; - } - - fn update_current(&mut self, sequence: Sequence) { - self.current = sequence; - } - - fn add_live_message(&mut self, sequence: Sequence, message: RawMessageData) { - self.buffer.push_back((sequence, message)); - - // trim buffer size to always be ~50 elements - while self.buffer.len() > 50 { - self.buffer.pop_front(); - } - } - - fn clear_buffer(&mut self) { - self.buffer.clear(); - } - - fn buffer_has_sequence(&self, sequence: &Sequence) -> bool { - match self.buffer.front() { - None => false, - Some((seq, _)) => seq <= sequence, - } - } - - fn pop_buffer(&mut self) -> Option<(Sequence, RawMessageData)> { - self.buffer.pop_front() - } - - fn reset_pending_deadline_to_immediately(&mut self) { - self.pending_deadline = PendingDeadline::Immediately; - } - - fn reset_pending_deadline(&mut self) { - self.pending_deadline = self - .pending_interval - .map(|i| PendingDeadline::Deadline(Instant::now() + i)) - .unwrap_or(PendingDeadline::None); - } - - fn is_pending_deadline_exceeded(&self) -> bool { - match self.pending_deadline { - PendingDeadline::None => false, - PendingDeadline::Immediately => true, - PendingDeadline::Deadline(deadline) => deadline <= Instant::now(), - } - } -} - -#[cfg(test)] -mod tests { - use std::{ - collections::HashMap, - sync::{Arc, Mutex}, - }; - - use apibara_core::stream::{RawMessageData, Sequence, StreamMessage}; - use futures::StreamExt; - use prost::Message; - use tokio::sync::mpsc; - use tokio_stream::wrappers::ReceiverStream; - use tokio_util::sync::CancellationToken; - - use crate::message_storage::MessageStorage; - - use super::BackfilledMessageStream; - - #[derive(Clone, prost::Message)] - pub struct TestMessage { - #[prost(uint64, tag = "1")] - pub sequence: u64, - } - - impl TestMessage { - pub fn new(sequence: u64) -> TestMessage { - TestMessage { sequence } - } - - pub fn new_raw(sequence: u64) -> RawMessageData { - let data = Self::new(sequence).encode_to_vec(); - RawMessageData::from_vec(data) - } - } - - #[derive(Debug, Default)] - pub struct TestMessageStorage { - messages: HashMap>, - } - - #[derive(Debug, thiserror::Error)] - pub enum TestMessageStorageError {} - - impl TestMessageStorage { - pub fn insert(&mut self, sequence: &Sequence, message: &TestMessage) { - let message = RawMessageData::from_vec(message.encode_to_vec()); - self.insert_raw(sequence, message); - } - - pub fn insert_raw(&mut self, sequence: &Sequence, message: RawMessageData) { - self.messages.insert(*sequence, message); - } - } - - impl MessageStorage for Arc> { - type Error = TestMessageStorageError; - - fn get( - &self, - sequence: &Sequence, - ) -> Result>, Self::Error> { - Ok(self.lock().unwrap().messages.get(sequence).cloned()) - } - } - - #[tokio::test] - pub async fn test_transition_between_backfilled_and_live() { - let storage = Arc::new(Mutex::new(TestMessageStorage::default())); - - for sequence in 0..10 { - let message = TestMessage::new(sequence); - storage - .lock() - .unwrap() - .insert(&Sequence::from_u64(sequence), &message); - } - - let (live_tx, live_rx) = mpsc::channel(256); - let live_stream = ReceiverStream::new(live_rx); - let ct = CancellationToken::new(); - - let mut stream = BackfilledMessageStream::new( - Sequence::from_u64(0), - Sequence::from_u64(9), - storage.clone(), - live_stream, - None, - ct, - ); - - live_tx - .send(Ok(StreamMessage::new_data( - Sequence::from_u64(10), - TestMessage::new_raw(10), - ))) - .await - .unwrap(); - - // first 10 messages come from storage - for sequence in 0..10 { - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), sequence); - } - - // 11th message from live stream - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), 10); - - // simulate node adding messages to storage (for persistence) while - // publishing to live stream - for sequence in 11..100 { - let message = TestMessage::new_raw(sequence); - let sequence = Sequence::from_u64(sequence); - storage - .lock() - .unwrap() - .insert_raw(&sequence, message.clone()); - let message = StreamMessage::new_data(sequence, message); - live_tx.send(Ok(message)).await.unwrap(); - } - - for sequence in 11..100 { - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), sequence); - } - } - - #[tokio::test] - pub async fn test_start_at_future_sequence() { - let storage = Arc::new(Mutex::new(TestMessageStorage::default())); - - let (live_tx, live_rx) = mpsc::channel(256); - let live_stream = ReceiverStream::new(live_rx); - let ct = CancellationToken::new(); - - let mut stream = BackfilledMessageStream::new( - Sequence::from_u64(15), - Sequence::from_u64(9), - storage.clone(), - live_stream, - None, - ct, - ); - - for sequence in 10..20 { - let message = TestMessage::new_raw(sequence); - let sequence = Sequence::from_u64(sequence); - let message = StreamMessage::new_data(sequence, message); - live_tx.send(Ok(message)).await.unwrap(); - } - - for sequence in 15..20 { - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), sequence); - } - } - - #[tokio::test] - pub async fn test_invalidate_data_after_current() { - let storage = Arc::new(Mutex::new(TestMessageStorage::default())); - - let (live_tx, live_rx) = mpsc::channel(256); - let live_stream = ReceiverStream::new(live_rx); - let ct = CancellationToken::new(); - - let mut stream = BackfilledMessageStream::new( - Sequence::from_u64(0), - Sequence::from_u64(9), - storage.clone(), - live_stream, - None, - ct, - ); - - // add some messages to storage - for sequence in 0..5 { - let message = TestMessage::new(sequence); - let sequence = Sequence::from_u64(sequence); - storage.lock().unwrap().insert(&sequence, &message); - } - - // live stream some messages - for sequence in 5..10 { - let message = TestMessage::new_raw(sequence); - let sequence = Sequence::from_u64(sequence); - storage - .lock() - .unwrap() - .insert_raw(&sequence, message.clone()); - let message = StreamMessage::new_data(sequence, message); - live_tx.send(Ok(message)).await.unwrap(); - } - - // invalidate all messages with sequence >= 8 - let sequence = Sequence::from_u64(8); - let message = StreamMessage::new_invalidate(sequence); - live_tx.send(Ok(message)).await.unwrap(); - - // then send some more messages - for sequence in 8..12 { - let message = TestMessage::new_raw(sequence); - let sequence = Sequence::from_u64(sequence); - storage - .lock() - .unwrap() - .insert_raw(&sequence, message.clone()); - let message = StreamMessage::new_data(sequence, message); - live_tx.send(Ok(message)).await.unwrap(); - } - - // notice there is no invalidate message because it happened for a - // message sequence that was never streamed - for sequence in 0..12 { - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), sequence); - assert!(message.is_data()); - } - } - - #[tokio::test] - pub async fn test_invalidate_before_current() { - let storage = Arc::new(Mutex::new(TestMessageStorage::default())); - - let (live_tx, live_rx) = mpsc::channel(256); - let live_stream = ReceiverStream::new(live_rx); - let ct = CancellationToken::new(); - - let mut stream = BackfilledMessageStream::new( - Sequence::from_u64(0), - Sequence::from_u64(9), - storage.clone(), - live_stream, - None, - ct, - ); - - // add some messages to storage - for sequence in 0..5 { - let message = TestMessage::new(sequence); - let sequence = Sequence::from_u64(sequence); - storage.lock().unwrap().insert(&sequence, &message); - } - - // live stream some messages - for sequence in 5..10 { - let message = TestMessage::new_raw(sequence); - let sequence = Sequence::from_u64(sequence); - storage - .lock() - .unwrap() - .insert_raw(&sequence, message.clone()); - let message = StreamMessage::new_data(sequence, message); - live_tx.send(Ok(message)).await.unwrap(); - } - - // now stream messages up to 9 - for sequence in 0..10 { - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), sequence); - assert!(message.is_data()); - } - - // invalidate all messages with sequence >= 8 - let sequence = Sequence::from_u64(8); - let message = StreamMessage::new_invalidate(sequence); - live_tx.send(Ok(message)).await.unwrap(); - - // then send some more messages - for sequence in 8..12 { - let message = TestMessage::new_raw(sequence); - let sequence = Sequence::from_u64(sequence); - storage - .lock() - .unwrap() - .insert_raw(&sequence, message.clone()); - let message = StreamMessage::new_data(sequence, message); - live_tx.send(Ok(message)).await.unwrap(); - } - - // received invalidate message - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), 8); - assert!(message.is_invalidate()); - - // resume messages - for sequence in 8..12 { - let message = stream.next().await.unwrap().unwrap(); - assert_eq!(message.sequence().as_u64(), sequence); - assert!(message.is_data()); - } - } -} diff --git a/node/src/o11y.rs b/node/src/o11y.rs deleted file mode 100644 index e6e9bfc5..00000000 --- a/node/src/o11y.rs +++ /dev/null @@ -1 +0,0 @@ -pub use apibara_observability::*; diff --git a/node/src/server/metadata.rs b/node/src/server/metadata.rs deleted file mode 100644 index a2e50fd5..00000000 --- a/node/src/server/metadata.rs +++ /dev/null @@ -1,145 +0,0 @@ -use crate::o11y::{self, Counter, KeyValue}; -use tonic::metadata::MetadataMap; -use tracing::{debug_span, Span}; - -pub trait RequestObserver: Send + Sync + 'static { - type Meter: RequestMeter; - - /// Returns a span to be used when tracing a `stream_data` request. - fn stream_data_span(&self, metadata: &MetadataMap) -> Span; - - /// Returns a meter to be used when metering a `stream_data` request. - fn stream_data_meter(&self, metadata: &MetadataMap) -> Self::Meter; -} - -pub trait RequestMeter: Send + Sync + 'static { - /// Increments the counter for the given name by the given amount. - fn increment_counter(&self, name: &'static str, amount: u64); - - /// Increments the counter for the total bytes sent by the given amount. - fn increment_bytes_sent_counter(&self, amount: u64); -} - -/// A [RequestObserver] that adds no context. -#[derive(Debug, Default)] -pub struct SimpleRequestObserver {} - -/// A [RequestMeter] that adds no context. -pub struct SimpleMeter { - counter: Counter, - bytes_sent_counter: Counter, -} - -/// A [RequestObserver] that adds a specific metadata value to the span and meter. -/// -/// This can be used to add information like current user or api keys. -pub struct MetadataKeyRequestObserver { - keys: Vec, -} - -/// A [RequestMeter] that adds information about the key used. -pub struct MetadataKeyMeter { - metadata: Vec, - counter: Counter, - bytes_sent_counter: Counter, -} - -impl Default for SimpleMeter { - fn default() -> Self { - let counter = new_data_out_counter(); - let bytes_sent_counter = new_bytes_sent_counter(); - SimpleMeter { - counter, - bytes_sent_counter, - } - } -} - -impl MetadataKeyMeter { - pub fn new(metadata: Vec) -> Self { - let counter = new_data_out_counter(); - let bytes_sent_counter = new_bytes_sent_counter(); - MetadataKeyMeter { - metadata, - counter, - bytes_sent_counter, - } - } -} - -impl MetadataKeyRequestObserver { - pub fn new(keys: Vec) -> Self { - MetadataKeyRequestObserver { keys } - } -} - -impl RequestObserver for SimpleRequestObserver { - type Meter = SimpleMeter; - - fn stream_data_span(&self, _metadata: &MetadataMap) -> Span { - debug_span!("stream_data") - } - - fn stream_data_meter(&self, _metadata: &MetadataMap) -> Self::Meter { - SimpleMeter::default() - } -} - -impl RequestMeter for SimpleMeter { - fn increment_counter(&self, name: &'static str, amount: u64) { - let cx = o11y::Context::current(); - self.counter - .add(&cx, amount, &[KeyValue::new("datum", name)]); - } - - fn increment_bytes_sent_counter(&self, amount: u64) { - let cx = o11y::Context::current(); - self.bytes_sent_counter.add(&cx, amount, &[]); - } -} - -impl RequestObserver for MetadataKeyRequestObserver { - type Meter = MetadataKeyMeter; - - fn stream_data_span(&self, _metadata: &MetadataMap) -> Span { - debug_span!("stream_data") - } - - fn stream_data_meter(&self, metadata: &MetadataMap) -> Self::Meter { - let mut result = Vec::with_capacity(self.keys.len()); - for key in &self.keys { - if let Some(value) = metadata.get(key) { - if let Ok(value) = value.to_str() { - result.push(KeyValue::new(key.clone(), value.to_owned())); - } - } - } - MetadataKeyMeter::new(result) - } -} - -impl RequestMeter for MetadataKeyMeter { - fn increment_counter(&self, name: &'static str, amount: u64) { - let cx = o11y::Context::current(); - // Once otel supports default attributes, we can use those instead of - // concatenating the attributes here. - let attributes = &[&[KeyValue::new("datum", name)], self.metadata.as_slice()].concat(); - self.counter.add(&cx, amount, attributes); - } - - fn increment_bytes_sent_counter(&self, amount: u64) { - let cx = o11y::Context::current(); - let attributes = self.metadata.as_slice(); - self.bytes_sent_counter.add(&cx, amount, attributes); - } -} - -fn new_data_out_counter() -> Counter { - let meter = o11y::meter("stream_data"); - meter.u64_counter("data_out").init() -} - -fn new_bytes_sent_counter() -> Counter { - let meter = o11y::meter("stream_data"); - meter.u64_counter("stream_bytes_sent").init() -} diff --git a/node/src/server/mod.rs b/node/src/server/mod.rs deleted file mode 100644 index c3a0730e..00000000 --- a/node/src/server/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -mod metadata; -mod quota; - -pub use self::metadata::{ - MetadataKeyRequestObserver, RequestMeter, RequestObserver, SimpleMeter, SimpleRequestObserver, -}; - -pub use self::quota::{ - QuotaClient, QuotaClientFactory, QuotaConfiguration, QuotaError, QuotaStatus, -}; diff --git a/node/src/server/quota.rs b/node/src/server/quota.rs deleted file mode 100644 index 7db3c252..00000000 --- a/node/src/server/quota.rs +++ /dev/null @@ -1,240 +0,0 @@ -use apibara_core::quota::v1::{ - quota_client::QuotaClient as GrpcQuotaClient, CheckRequest, QuotaStatus as GrpcQuotaStatus, - UpdateAndCheckRequest, -}; -use hyper::Uri; -use tonic::{metadata::MetadataMap, transport::Channel, Request}; -use tracing::debug; - -#[derive(Debug, thiserror::Error)] -pub enum QuotaError { - #[error("missing team metadata key")] - MissingTeamMetadataKey, - #[error("invalid team metadata value")] - InvalidTeamMetadataKey, - #[error("missing client metadata key")] - MissingClientMetadataKey, - #[error("invalid client metadata value")] - InvalidClientMetadataKey, - #[error("grpc error: {0}")] - Grpc(#[from] tonic::transport::Error), - #[error("grpc request error: {0}")] - Request(#[from] tonic::Status), -} - -#[derive(Debug, Clone)] -pub enum QuotaStatus { - /// Quota left. - Ok, - /// Quota exceeded. - Exceeded, -} - -impl QuotaStatus { - pub fn is_exceeded(&self) -> bool { - match self { - QuotaStatus::Ok => false, - QuotaStatus::Exceeded => true, - } - } -} - -#[derive(Debug, Clone)] -pub enum QuotaConfiguration { - NoQuota, - RemoteQuota { - /// Network name, used for reporting. - network_name: String, - /// Metadata key used to identify the team. - team_metadata_key: String, - /// Metadata key used to identify the client. - client_metadata_key: Option, - /// Quota server address. - server_address: Uri, - }, -} - -#[derive(Debug, Clone)] -pub struct QuotaClientFactory { - configuration: QuotaConfiguration, -} - -#[derive(Debug, Default, Clone)] -pub struct NoQuotaClient; - -#[derive(Debug, Clone)] -pub struct RemoteQuotaClient { - client: GrpcQuotaClient, - network_name: String, - team_name: String, - client_name: Option, -} - -pub enum QuotaClient { - NoQuotaClient(NoQuotaClient), - RemoteQuotaClient(RemoteQuotaClient), -} - -impl QuotaClientFactory { - pub fn new(configuration: QuotaConfiguration) -> Self { - QuotaClientFactory { configuration } - } - - pub async fn client_with_metadata( - &self, - metadata: &MetadataMap, - ) -> Result { - match &self.configuration { - QuotaConfiguration::NoQuota => Ok(QuotaClient::no_quota()), - QuotaConfiguration::RemoteQuota { - network_name, - team_metadata_key, - client_metadata_key, - server_address, - } => { - let team_name = metadata - .get(team_metadata_key) - .ok_or(QuotaError::MissingTeamMetadataKey)? - .to_str() - .map_err(|_| QuotaError::InvalidTeamMetadataKey)? - .to_string(); - - let client_name = if let Some(client_metadata_key) = client_metadata_key { - let value = metadata - .get(client_metadata_key) - .ok_or(QuotaError::MissingClientMetadataKey)? - .to_str() - .map_err(|_| QuotaError::InvalidClientMetadataKey)? - .to_string(); - Some(value) - } else { - None - }; - - let endpoint = Channel::builder(server_address.clone()); - - debug!( - server_address = %server_address, - team_name = %team_name, - client_name = ?client_name, - "using remote quota server" - ); - - let client = GrpcQuotaClient::connect(endpoint).await?; - - Ok(QuotaClient::remote_quota( - client, - team_name, - client_name, - network_name.clone(), - )) - } - } - } -} - -impl QuotaClient { - pub fn no_quota() -> Self { - QuotaClient::NoQuotaClient(NoQuotaClient::new()) - } - - pub fn remote_quota( - client: GrpcQuotaClient, - team_name: String, - client_name: Option, - network_name: String, - ) -> Self { - let inner = RemoteQuotaClient::new(client, team_name, client_name, network_name); - QuotaClient::RemoteQuotaClient(inner) - } - - pub async fn check(&self) -> Result { - match self { - QuotaClient::NoQuotaClient(client) => Ok(client.check()), - QuotaClient::RemoteQuotaClient(client) => Ok(client.check().await?), - } - } - - pub async fn update_and_check(&self, du: u64) -> Result { - match self { - QuotaClient::NoQuotaClient(client) => Ok(client.update_and_check(du)), - QuotaClient::RemoteQuotaClient(client) => Ok(client.update_and_check(du).await?), - } - } -} - -impl NoQuotaClient { - pub fn new() -> Self { - Default::default() - } - - pub fn check(&self) -> QuotaStatus { - QuotaStatus::Ok - } - - pub fn update_and_check(&self, _data_units: u64) -> QuotaStatus { - QuotaStatus::Ok - } -} - -impl RemoteQuotaClient { - pub fn new( - client: GrpcQuotaClient, - team_name: String, - client_name: Option, - network_name: String, - ) -> Self { - RemoteQuotaClient { - client, - network_name, - team_name, - client_name, - } - } - - pub async fn check(&self) -> Result { - let request = CheckRequest { - network: self.network_name.clone(), - team_name: self.team_name.clone(), - client_name: self.client_name.clone(), - }; - let request = Request::new(request); - let response = self.client.clone().check(request).await?; - let response = response.into_inner(); - if response.status == GrpcQuotaStatus::Ok as i32 { - Ok(QuotaStatus::Ok) - } else { - Ok(QuotaStatus::Exceeded) - } - } - - pub async fn update_and_check(&self, du: u64) -> Result { - let request = UpdateAndCheckRequest { - network: self.network_name.clone(), - team_name: self.team_name.clone(), - client_name: self.client_name.clone(), - data_units: du, - }; - let request = Request::new(request); - let response = self.client.clone().update_and_check(request).await?; - let response = response.into_inner(); - if response.status == GrpcQuotaStatus::Ok as i32 { - Ok(QuotaStatus::Ok) - } else { - Ok(QuotaStatus::Exceeded) - } - } -} - -impl QuotaError { - pub fn human_readable(&self) -> &'static str { - match &self { - Self::InvalidTeamMetadataKey => "invalid team metadata value", - Self::MissingTeamMetadataKey => "team metadata is required", - Self::InvalidClientMetadataKey => "invalid client metadata value", - Self::MissingClientMetadataKey => "client metadata is required", - Self::Grpc(_) => "quota server error", - _ => "internal", - } - } -} diff --git a/node/src/stream/configuration.rs b/node/src/stream/configuration.rs deleted file mode 100644 index c7ad5a91..00000000 --- a/node/src/stream/configuration.rs +++ /dev/null @@ -1,169 +0,0 @@ -use std::{ - pin::Pin, - task::{self, Poll}, -}; - -use apibara_core::node::v1alpha2::{DataFinality, StreamDataRequest}; -use futures::Stream; -use pin_project::pin_project; -use prost::Message; -use tracing::warn; - -use crate::core::Cursor; - -use super::error::StreamError; - -const MIN_BATCH_SIZE: usize = 1; -const MAX_BATCH_SIZE: usize = 50; -const DEFAULT_BATCH_SIZE: usize = 20; - -#[derive(Default, Clone, Debug)] -pub struct StreamConfiguration -where - C: Cursor, - F: Message + Default + Clone, -{ - pub batch_size: usize, - pub stream_id: u64, - pub finality: DataFinality, - pub starting_cursor: Option, - pub filter: Vec, -} - -#[derive(Default)] -struct StreamConfigurationStreamState -where - C: Cursor, - F: Message + Default + Clone, -{ - current: Option>, -} - -#[pin_project] -pub struct StreamConfigurationStream -where - C: Cursor, - F: Message + Default + Clone, - S: Stream>, - E: std::error::Error + Send + Sync + 'static, -{ - #[pin] - inner: S, - state: StreamConfigurationStreamState, -} - -impl StreamConfigurationStream -where - C: Cursor, - F: Message + Default + Clone, - S: Stream>, - E: std::error::Error + Send + Sync + 'static, -{ - pub fn new(inner: S) -> Self { - StreamConfigurationStream { - inner, - state: Default::default(), - } - } -} - -impl StreamConfigurationStreamState -where - C: Cursor, - F: Message + Default + Clone, -{ - fn handle_request( - &mut self, - request: StreamDataRequest, - ) -> Result, StreamError> { - let batch_size = request.batch_size.unwrap_or(DEFAULT_BATCH_SIZE as u64) as usize; - let batch_size = batch_size.clamp(MIN_BATCH_SIZE, MAX_BATCH_SIZE); - - let finality = request - .finality - .and_then(DataFinality::from_i32) - .unwrap_or(DataFinality::DataStatusAccepted); - - let stream_id = request.stream_id.unwrap_or_default(); - - let filter: Vec = if request.filter.is_empty() { - if request.multi_filter.is_empty() { - return Err(StreamError::invalid_request( - "missing filter configuration".to_string(), - )); - } - - if batch_size != 1 { - return Err(StreamError::invalid_request( - "multi-filter configuration is only supported with batch size 1".to_string(), - )); - } - - request - .multi_filter - .iter() - .map(|v| F::decode(v.as_ref())) - .collect::, _>>() - .map_err(|_| { - StreamError::invalid_request("invalid multi-filter configuration".to_string()) - })? - } else { - let filter = F::decode(request.filter.as_ref()).map_err(|_| { - StreamError::invalid_request("invalid filter configuration".to_string()) - })?; - - vec![filter] - }; - - let starting_cursor = match request.starting_cursor { - None => None, - Some(starting_cursor) => match C::from_proto(&starting_cursor) { - Some(cursor) => Some(cursor), - None => { - return Err(StreamError::invalid_request( - "invalid starting cursor".to_string(), - )); - } - }, - }; - - let configuration = StreamConfiguration { - batch_size, - finality, - stream_id, - filter, - starting_cursor, - }; - - self.current = Some(configuration.clone()); - - Ok(configuration) - } -} - -impl Stream for StreamConfigurationStream -where - C: Cursor, - F: Message + Default + Clone, - S: Stream>, - E: std::error::Error + Send + Sync + 'static, -{ - type Item = Result, StreamError>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let this = self.project(); - match this.inner.poll_next(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(None) => Poll::Ready(None), - Poll::Ready(Some(Err(err))) => { - warn!(err = ?err, "configuration stream error"); - let err = Err(StreamError::internal(err)); - Poll::Ready(Some(err)) - } - Poll::Ready(Some(Ok(request))) => { - let result = this.state.handle_request(request); - Poll::Ready(Some(result)) - } - } - } -} diff --git a/node/src/stream/data.rs b/node/src/stream/data.rs deleted file mode 100644 index 6bc67b8b..00000000 --- a/node/src/stream/data.rs +++ /dev/null @@ -1,343 +0,0 @@ -use core::num::NonZeroU32; -use std::time::{Duration, Instant}; - -use apibara_core::node::v1alpha2::{ - stream_data_response, Data, DataFinality, Heartbeat, Invalidate, StreamDataResponse, -}; -use async_stream::stream; -use futures::{stream::FusedStream, Stream, StreamExt}; -use governor::{DefaultDirectRateLimiter, Quota, RateLimiter}; -use prost::Message; -use tracing::{debug_span, instrument, trace, Instrument}; - -use crate::{ - core::Cursor, - server::{QuotaClient, QuotaStatus, RequestMeter}, - stream::BatchCursor, -}; - -use super::{ - BatchProducer, CursorProducer, IngestionMessage, IngestionResponse, ReconfigureResponse, - StreamConfiguration, StreamError, -}; - -pub fn new_data_stream( - configuration_stream: impl Stream, StreamError>> + Unpin, - ingestion_stream: impl Stream, StreamError>> + Unpin, - mut cursor_producer: impl CursorProducer + Unpin + FusedStream, - mut batch_producer: impl BatchProducer, - blocks_per_second_quota: u32, - meter: M, - quota_client: QuotaClient, -) -> impl Stream> -where - C: Cursor + Send + Sync, - F: Message + Default + Clone, - B: Message + Default + Clone, - M: RequestMeter, -{ - let mut configuration_stream = configuration_stream.fuse(); - let mut ingestion_stream = ingestion_stream.fuse(); - - let mut limiter = new_rate_limiter(blocks_per_second_quota, 1); - - // try_stream! doesn't work with tokio::select! so we have to use stream! and helper functions. - Box::pin(stream! { - let mut stream_id = 0; - let mut has_configuration = false; - let mut last_batch_sent = Instant::now(); - // Send a batch (no matter if empty or not) at least once every this interval. - let max_batch_interval = Duration::from_secs(10); - - let mut last_quota_sent = Instant::now(); - let quota_interval = Duration::from_secs(15); - - let mut data_units = 0u64; - - match quota_client.check().await.map_err(StreamError::internal)? { - QuotaStatus::Ok => {}, - QuotaStatus::Exceeded => { - yield Err(StreamError::quota_exceeded()); - }, - } - - { - // Some clients (notably tonic) wait for the first message before - // returning the response stream. Since this stream won't produce - // any data until a configuration is sent, it will result in the - // client waiting for the first heartbeat message. - // To avoid this, we send a heartbeat message as soon as possible. - use stream_data_response::Message; - yield Ok(StreamDataResponse { - stream_id, - message: Some(Message::Heartbeat(Heartbeat::default())), - }); - } - - loop { - tokio::select! { - // check streams in order. - // always check configuration stream first since any change to configuration will - // change the data being produced. - // then check ingestion messages, this also helps avoid sending data and then - // immediately invalidating it. - // only at the end, produce new data. - biased; - - configuration_message = configuration_stream.select_next_some() => { - has_configuration = true; - match handle_configuration_message(&mut cursor_producer, &mut batch_producer, configuration_message).await { - Ok((new_stream_id, batch_size, configure_response)) => { - stream_id = new_stream_id; - limiter = new_rate_limiter(blocks_per_second_quota, batch_size); - // send invalidate message if the specified cursor is no longer valid. - match configure_response { - ReconfigureResponse::Ok => {}, - ReconfigureResponse::MissingStartingCursor => { - yield Err(StreamError::invalid_request("the specified starting cursor doesn't exist".to_string())); - break; - }, - ReconfigureResponse::Invalidate(cursor) => { - use stream_data_response::Message; - let message = Invalidate { - cursor: Some(cursor.to_proto()), - }; - - yield Ok(StreamDataResponse { - stream_id, - message: Some(Message::Invalidate(message)), - }); - }, - }; - }, - Err(err) => { - yield Err(err); - break; - }, - } - }, - - ingestion_message = ingestion_stream.select_next_some() => { - match handle_ingestion_message(&mut cursor_producer, ingestion_message).await { - Ok(IngestionResponse::Invalidate(cursor)) => { - use stream_data_response::Message; - let message = Invalidate { - cursor: Some(cursor.to_proto()), - }; - - yield Ok(StreamDataResponse { - stream_id, - message: Some(Message::Invalidate(message)), - }); - }, - Ok(IngestionResponse::Ok) => { - // nothing to do. - // either message was a new accepted/finalized block, or stream is at - // lower block than invalidated message. - }, - Err(err) => { - yield Err(err); - break; - }, - } - }, - - batch_cursor = cursor_producer.select_next_some(), if has_configuration => { - use stream_data_response::Message; - - match handle_batch_cursor(&mut cursor_producer, &mut batch_producer, batch_cursor, &meter, &limiter).await { - Ok((data, finality)) => { - let should_send_data = - if !data.data.is_empty() || finality == DataFinality::DataStatusAccepted { - true - } else { - last_batch_sent.elapsed() > max_batch_interval - }; - - if !should_send_data { - trace!("skip empty batch"); - continue - } - - data_units += data.data.len() as u64; - - if last_quota_sent.elapsed() > quota_interval { - match quota_client.update_and_check(data_units).await { - Ok(QuotaStatus::Ok) => {}, - Ok(QuotaStatus::Exceeded) => { - yield Err(StreamError::quota_exceeded()); - break; - }, - Err(err) => { - yield Err(StreamError::internal(err)); - break; - } - } - - data_units = 0; - last_quota_sent = Instant::now(); - } - - last_batch_sent = Instant::now(); - yield Ok(StreamDataResponse { - stream_id, - message: Some(Message::Data(data)), - }); - }, - Err(err) => { - yield Err(err); - break; - }, - } - } - } - } - }) -} - -#[instrument(skip_all, level = "debug")] -async fn handle_configuration_message( - cursor_producer: &mut impl CursorProducer, - batch_producer: &mut impl BatchProducer, - configuration_message: Result, StreamError>, -) -> Result<(u64, usize, ReconfigureResponse), StreamError> -where - C: Cursor + Send + Sync, - F: Message + Default + Clone, - B: Message + Default + Clone, -{ - let configuration_message = configuration_message?; - - let cursor_producer_span = debug_span!( - "reconfigure_cursor_producer", - stream_id = configuration_message.stream_id - ); - - let ingestion_response = cursor_producer - .reconfigure(&configuration_message) - .instrument(cursor_producer_span) - .await?; - - let batch_producer_span = debug_span!( - "reconfigure_batch_producer", - stream_id = configuration_message.stream_id - ); - batch_producer_span.in_scope(|| batch_producer.reconfigure(&configuration_message))?; - - Ok(( - configuration_message.stream_id, - configuration_message.batch_size, - ingestion_response, - )) -} - -#[instrument(skip_all, level = "debug")] -async fn handle_ingestion_message( - cursor_producer: &mut impl CursorProducer, - ingestion_message: Result, StreamError>, -) -> Result, StreamError> -where - C: Cursor + Send + Sync, - F: Message + Default + Clone, -{ - let ingestion_message = ingestion_message?; - cursor_producer - .handle_ingestion_message(&ingestion_message) - .await -} - -async fn handle_batch_cursor( - _cursor_producer: &mut impl CursorProducer, - batch_producer: &mut impl BatchProducer, - batch_cursor: Result, StreamError>, - meter: &M, - limiter: &DefaultDirectRateLimiter, -) -> Result<(Data, DataFinality), StreamError> -where - C: Cursor + Send + Sync, - F: Message + Default + Clone, - B: Message + Default + Clone, - M: RequestMeter, -{ - let batch_cursor = batch_cursor?; - let (start_cursor, cursors, end_cursor, finality) = match batch_cursor { - BatchCursor::Finalized(start_cursor, cursors) => { - let end_cursor = cursors.last().cloned(); - ( - start_cursor, - cursors, - end_cursor, - DataFinality::DataStatusFinalized, - ) - } - BatchCursor::Accepted(start_cursor, cursor) => ( - start_cursor, - vec![cursor.clone()], - Some(cursor), - DataFinality::DataStatusAccepted, - ), - BatchCursor::Pending(start_cursor, cursor) => ( - start_cursor, - vec![cursor.clone()], - Some(cursor), - DataFinality::DataStatusPending, - ), - }; - - limiter.until_ready().await; - - let handle_batch_span = debug_span!( - "handle_batch", - start_cursor = ?start_cursor, - end_cursor = ?end_cursor, - ); - - async move { - let next_batch_span = debug_span!( - "next_batch", - start_cursor = ?start_cursor, - end_cursor = ?end_cursor, - ); - - let batch = batch_producer - .next_batch(cursors.into_iter(), meter) - .instrument(next_batch_span) - .await?; - - let serialize_batch_span = debug_span!( - "serialize_batch", - start_cursor = ?start_cursor, - end_cursor = ?end_cursor, - ); - - let data = serialize_batch_span.in_scope(|| { - batch - .iter() - .map(|block| block.encode_to_vec()) - .collect::>() - }); - - let total_size_bytes = data.iter().map(|block| block.len()).sum::(); - meter.increment_bytes_sent_counter(total_size_bytes as u64); - - let data = Data { - cursor: start_cursor.map(|cursor| cursor.to_proto()), - end_cursor: end_cursor.map(|cursor| cursor.to_proto()), - finality: finality as i32, - data, - }; - - Ok((data, finality)) - } - .instrument(handle_batch_span) - .await -} - -fn new_rate_limiter(blocks_per_second_quota: u32, batch_size: usize) -> DefaultDirectRateLimiter { - // Convert to quota per minute to allow some bursting at the beginning. - let quota_per_minute = - NonZeroU32::new(1 + blocks_per_second_quota * 60 / batch_size as u32).unwrap(); - let quota = Quota::per_minute(quota_per_minute); - - RateLimiter::direct(quota) -} diff --git a/node/src/stream/error.rs b/node/src/stream/error.rs deleted file mode 100644 index 0771bc08..00000000 --- a/node/src/stream/error.rs +++ /dev/null @@ -1,38 +0,0 @@ -use tracing::warn; - -#[derive(Debug, thiserror::Error)] -pub enum StreamError { - #[error("internal error: {0}")] - Internal(Box), - #[error("quota exceeded")] - QuotaExceeded, - #[error("invalid request: {message}")] - InvalidRequest { message: String }, -} - -impl StreamError { - pub fn invalid_request(message: String) -> Self { - StreamError::InvalidRequest { message } - } - - pub fn quota_exceeded() -> Self { - StreamError::QuotaExceeded - } - - pub fn internal(err: impl Into>) -> Self { - StreamError::Internal(err.into()) - } - - pub fn into_status(self) -> tonic::Status { - match self { - StreamError::Internal(err) => { - warn!(err = ?err, "stream error"); - tonic::Status::internal("internal server error") - } - StreamError::QuotaExceeded => tonic::Status::resource_exhausted( - "monthly data quota exceeded. Please contact support.", - ), - StreamError::InvalidRequest { message } => tonic::Status::invalid_argument(message), - } - } -} diff --git a/node/src/stream/heartbeat.rs b/node/src/stream/heartbeat.rs deleted file mode 100644 index 0a05f6f7..00000000 --- a/node/src/stream/heartbeat.rs +++ /dev/null @@ -1,116 +0,0 @@ -//! Add heartbeat to streams. - -use std::{ - fmt, - pin::Pin, - task::{self, Poll}, - time::Duration, -}; - -use futures::{stream::Fuse, Future, StreamExt}; -use pin_project::pin_project; -use tokio::time::{Instant, Sleep}; -use tokio_stream::Stream; - -pub trait HeartbeatStreamExt: Stream { - /// Returns a new stream that contains heartbeat messages. - /// - /// Heartbeat messages are produced if the original stream - /// doesn't produce any message for `interval` time. - /// The heartbeat interval is reset every time the stream - /// produces a new message. - fn heartbeat(self, interval: Duration) -> Heartbeat - where - Self: Sized, - { - Heartbeat::new(self, interval) - } -} - -impl HeartbeatStreamExt for St where St: Stream {} - -#[pin_project] -#[must_use = "streams do nothing unless polled"] -#[derive(Debug)] -/// Stream returned by [`heartbeat`](HeartbeatStreamExt::timeout). -pub struct Heartbeat { - #[pin] - stream: Fuse, - #[pin] - deadline: Sleep, - interval: Duration, - needs_reset: bool, -} - -impl Heartbeat { - pub fn new(stream: S, interval: Duration) -> Heartbeat { - let stream = stream.fuse(); - let next = Instant::now() + interval; - let deadline = tokio::time::sleep_until(next); - - Heartbeat { - stream, - deadline, - interval, - needs_reset: true, - } - } -} - -impl Stream for Heartbeat { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let this = self.project(); - - if *this.needs_reset { - let next = Instant::now() + *this.interval; - this.deadline.reset(next); - *this.needs_reset = false; - cx.waker().wake_by_ref(); - return Poll::Pending; - } - - match this.stream.poll_next(cx) { - Poll::Ready(v) => { - if v.is_some() { - *this.needs_reset = true; - } - return Poll::Ready(v.map(Ok)); - } - Poll::Pending => {} - } - - match this.deadline.poll(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(_) => { - *this.needs_reset = true; - Poll::Ready(Some(Err(Beat::new()))) - } - } - } -} - -/// Error returned by `Heartbeat`. -#[derive(Debug, PartialEq)] -pub struct Beat(()); - -impl Beat { - pub(crate) fn new() -> Self { - Beat(()) - } -} - -impl fmt::Display for Beat { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - "heartbeat deadline has elapsed".fmt(fmt) - } -} - -impl std::error::Error for Beat {} - -impl From for std::io::Error { - fn from(_err: Beat) -> std::io::Error { - std::io::ErrorKind::TimedOut.into() - } -} diff --git a/node/src/stream/ingestion.rs b/node/src/stream/ingestion.rs deleted file mode 100644 index ef2d7b9d..00000000 --- a/node/src/stream/ingestion.rs +++ /dev/null @@ -1,16 +0,0 @@ -use crate::core::Cursor; - -/// Events about blocks that are being ingested. -#[derive(Debug, Clone)] -pub enum IngestionMessage { - /// Finalized block ingested. - Finalized(C), - /// Accepted block ingested. - Accepted(C), - /// Pending block ingested. - Pending(C), - /// Chain reorganization with root at the given block. - /// Notice that the given root belongs to the new chain - /// and is now the tip of it. - Invalidate(C), -} diff --git a/node/src/stream/mod.rs b/node/src/stream/mod.rs deleted file mode 100644 index b60ccc09..00000000 --- a/node/src/stream/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -mod configuration; -mod data; -mod error; -mod heartbeat; -mod ingestion; -mod producers; -mod response; - -pub use self::configuration::{StreamConfiguration, StreamConfigurationStream}; -pub use self::data::new_data_stream; -pub use self::error::StreamError; -pub use self::heartbeat::Heartbeat; -pub use self::ingestion::IngestionMessage; -pub use self::producers::{ - BatchCursor, BatchProducer, CursorProducer, IngestionResponse, ReconfigureResponse, -}; -pub use self::response::ResponseStream; diff --git a/node/src/stream/producers.rs b/node/src/stream/producers.rs deleted file mode 100644 index 7c0269eb..00000000 --- a/node/src/stream/producers.rs +++ /dev/null @@ -1,141 +0,0 @@ -use async_trait::async_trait; -use futures::Stream; -use prost::Message; - -use super::{configuration::StreamConfiguration, error::StreamError, ingestion::IngestionMessage}; -use crate::{core::Cursor, server::RequestMeter}; - -/// The response to an ingestion message. -#[derive(Debug)] -pub enum IngestionResponse { - /// Invalidate all data after the given cursor. - Invalidate(C), - /// No invalidation is required. - Ok, -} - -/// The response to a call to reconfigure. -#[derive(Debug)] -pub enum ReconfigureResponse { - /// Invalidate all data after the given cursor. - Invalidate(C), - /// No invalidation is required. - Ok, - /// The specified starting cursor doesn't exists. - MissingStartingCursor, -} - -/// A batch cursor. -#[derive(Debug)] -pub enum BatchCursor { - /// A bunch of finalized data. - Finalized(Option, Vec), - /// A single accepted cursor. - Accepted(Option, C), - /// A single pending cursor. - Pending(Option, C), -} - -/// An object that produces cursors. -#[async_trait] -pub trait CursorProducer: Stream, StreamError>> { - type Cursor: Cursor; - type Filter: Message + Default + Clone; - - /// Reconfigure the cursor producer. - /// - /// Since the user specifies a starting cursor, this function can signal if the specified - /// cursor has been invalidated while the client was offline. - async fn reconfigure( - &mut self, - configuration: &StreamConfiguration, - ) -> Result, StreamError>; - - /// Handles an ingestion message. - /// - /// This handler should be used to resync the producer's current state with the chain state. - async fn handle_ingestion_message( - &mut self, - message: &IngestionMessage, - ) -> Result, StreamError>; -} - -#[async_trait] -pub trait BatchProducer { - type Cursor: Cursor; - type Filter: Message + Default + Clone; - type Block; - - fn reconfigure( - &mut self, - configuration: &StreamConfiguration, - ) -> Result<(), StreamError>; - - async fn next_batch( - &mut self, - cursors: impl Iterator + Send + Sync, - meter: &M, - ) -> Result, StreamError>; -} - -impl BatchCursor { - /// Creates a new finalized batch cursor. - /// - /// Panics if `cursors` is empty. - pub fn new_finalized(start_cursor: Option, cursors: Vec) -> Self { - BatchCursor::Finalized(start_cursor, cursors) - } - - /// Creates a new accepted batch cursor. - pub fn new_accepted(start_cursor: Option, cursor: C) -> Self { - BatchCursor::Accepted(start_cursor, cursor) - } - - /// Creates a new pending batch cursor. - pub fn new_pending(start_cursor: Option, cursor: C) -> Self { - BatchCursor::Pending(start_cursor, cursor) - } - - /// Returns the start cursor, that is the cursor immediately before the first cursor in the - /// batch. - pub fn start_cursor(&self) -> Option<&C> { - match self { - BatchCursor::Finalized(start_cursor, _) => start_cursor.as_ref(), - BatchCursor::Accepted(start_cursor, _) => start_cursor.as_ref(), - BatchCursor::Pending(start_cursor, _) => start_cursor.as_ref(), - } - } - - /// Returns the last cursor in the batch. - pub fn end_cursor(&self) -> &C { - match self { - BatchCursor::Finalized(_, cursors) => cursors.last().expect("empty batch"), - BatchCursor::Accepted(_, ref cursor) => cursor, - BatchCursor::Pending(_, ref cursor) => cursor, - } - } - - /// Returns the finalized cursors. - pub fn as_finalized(&self) -> Option<&[C]> { - match self { - BatchCursor::Finalized(_, ref cursors) => Some(cursors), - _ => None, - } - } - - /// Returns the accepted cursor. - pub fn as_accepted(&self) -> Option<&C> { - match self { - BatchCursor::Accepted(_, ref cursor) => Some(cursor), - _ => None, - } - } - - /// Returns the pending cursor. - pub fn as_pending(&self) -> Option<&C> { - match self { - BatchCursor::Pending(_, ref cursor) => Some(cursor), - _ => None, - } - } -} diff --git a/node/src/stream/response.rs b/node/src/stream/response.rs deleted file mode 100644 index 016e51ec..00000000 --- a/node/src/stream/response.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::{ - pin::Pin, - task::{self, Poll}, - time::Duration, -}; - -use apibara_core::node::v1alpha2::StreamDataResponse; -use futures::Stream; -use pin_project::pin_project; - -use super::{error::StreamError, heartbeat::Heartbeat}; - -#[pin_project] -pub struct ResponseStream -where - S: Stream>, -{ - #[pin] - inner: Heartbeat, -} - -impl ResponseStream -where - S: Stream>, -{ - pub fn new(inner: S) -> Self { - let inner = Heartbeat::new(inner, Duration::from_secs(30)); - ResponseStream { inner } - } -} - -impl Stream for ResponseStream -where - S: Stream> + Unpin, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let this = self.project(); - match this.inner.poll_next(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(None) => Poll::Ready(None), - Poll::Ready(Some(value)) => { - let response = match value { - Err(_) => { - // heartbeat - use apibara_core::node::v1alpha2::{ - stream_data_response::Message, Heartbeat, - }; - - // stream_id is not relevant for heartbeat messages - let response = StreamDataResponse { - stream_id: 0, - message: Some(Message::Heartbeat(Heartbeat {})), - }; - Ok(response) - } - Ok(Err(err)) => Err(err.into_status()), - Ok(Ok(response)) => Ok(response), - }; - Poll::Ready(Some(response)) - } - } - } -} diff --git a/observability/Cargo.toml b/observability/Cargo.toml index b48cb963..5d8e96cf 100644 --- a/observability/Cargo.toml +++ b/observability/Cargo.toml @@ -8,9 +8,11 @@ license.workspace = true [dependencies] error-stack.workspace = true +nu-ansi-term = "0.50.1" opentelemetry.workspace = true opentelemetry-otlp.workspace = true +opentelemetry_sdk.workspace = true +time.workspace = true tracing.workspace = true tracing-opentelemetry.workspace = true tracing-subscriber.workspace = true -tracing-tree.workspace = true diff --git a/observability/src/dna_fmt.rs b/observability/src/dna_fmt.rs new file mode 100644 index 00000000..e79a485e --- /dev/null +++ b/observability/src/dna_fmt.rs @@ -0,0 +1,229 @@ +use std::{fmt, io}; + +use nu_ansi_term::{Color, Style}; +use tracing::{field, span, Event, Level, Subscriber}; + +use tracing_subscriber::field::{VisitFmt, VisitOutput}; +use tracing_subscriber::fmt::format::Writer; +use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields}; +use tracing_subscriber::{prelude::*, registry::LookupSpan}; + +pub struct DnaFormat { + time_format: time::format_description::OwnedFormatItem, +} + +impl FormatEvent for DnaFormat +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event( + &self, + ctx: &FmtContext<'_, S, N>, + mut writer: Writer<'_>, + event: &Event<'_>, + ) -> std::fmt::Result { + let meta = event.metadata(); + + write!( + writer, + "{}", + FmtLevel::new(meta.level(), writer.has_ansi_escapes()) + )?; + writer.write_char(' ')?; + if self.format_time(&mut writer).is_err() { + write!(writer, "[]")?; + }; + writer.write_char(' ')?; + + ctx.format_fields(writer.by_ref(), event)?; + + writeln!(writer) + } +} + +impl<'w> FormatFields<'w> for DnaFormat { + fn format_fields( + &self, + writer: Writer<'w>, + fields: R, + ) -> fmt::Result { + let mut v = DnaFormatVisitor::new(writer, true); + fields.record(&mut v); + v.finish() + } + + fn add_fields( + &self, + current: &'w mut tracing_subscriber::fmt::FormattedFields, + fields: &span::Record<'_>, + ) -> fmt::Result { + let empty = current.is_empty(); + let writer = current.as_writer(); + let mut v = DnaFormatVisitor::new(writer, empty); + fields.record(&mut v); + v.finish() + } +} + +struct DnaFormatVisitor<'a> { + writer: Writer<'a>, + is_empty: bool, + style: Style, + result: std::fmt::Result, +} + +impl<'a> DnaFormatVisitor<'a> { + fn new(writer: Writer<'a>, is_empty: bool) -> Self { + Self { + writer, + is_empty, + style: Style::new(), + result: Ok(()), + } + } + + fn write_padded(&mut self, v: &impl fmt::Debug) { + let padding = if self.is_empty { + self.is_empty = false; + "" + } else { + " " + }; + + self.result = write!(self.writer, "{}{:?}", padding, v); + } +} + +impl<'a> field::Visit for DnaFormatVisitor<'a> { + fn record_str(&mut self, field: &field::Field, value: &str) { + if self.result.is_err() { + return; + } + + if field.name() == "message" { + self.record_debug(field, &format_args!("{:0<60}", value)) + } else { + self.record_debug(field, &value) + } + } + + fn record_debug(&mut self, field: &field::Field, value: &dyn fmt::Debug) { + if self.result.is_err() { + return; + } + + let value = format!("{:?}", value); + match field.name() { + "message" => { + self.write_padded(&format_args!("{}{:<40}", self.style.prefix(), value)); + } + name => { + let color = if field.name() == "error" { + Color::Red + } else { + Color::Blue + }; + + if self.writer.has_ansi_escapes() { + self.write_padded(&format_args!( + "{}{}={}", + self.style.prefix(), + name, + color.paint(value) + )); + } else { + self.write_padded(&format_args!("{}{}={}", self.style.prefix(), name, value)); + } + } + } + } +} + +impl<'a> VisitOutput for DnaFormatVisitor<'a> { + fn finish(mut self) -> std::fmt::Result { + write!(&mut self.writer, "{}", self.style.suffix())?; + self.result + } +} + +impl<'a> VisitFmt for DnaFormatVisitor<'a> { + fn writer(&mut self) -> &mut dyn fmt::Write { + &mut self.writer + } +} + +impl DnaFormat { + pub fn format_time(&self, writer: &mut Writer<'_>) -> std::fmt::Result { + let now = time::OffsetDateTime::from(std::time::SystemTime::now()); + let mut w = WriteAdaptor { fmt_writer: writer }; + now.format_into(&mut w, &self.time_format) + .map_err(|_| std::fmt::Error)?; + Ok(()) + } +} + +struct FmtLevel<'a> { + level: &'a Level, + ansi: bool, +} + +struct WriteAdaptor<'a> { + fmt_writer: &'a mut dyn fmt::Write, +} + +impl<'a> FmtLevel<'a> { + fn new(level: &'a Level, ansi: bool) -> Self { + Self { level, ansi } + } +} + +impl<'a> std::fmt::Display for FmtLevel<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.ansi { + match *self.level { + Level::TRACE => write!(f, "[{}]", Color::Purple.paint("TRACE")), + Level::DEBUG => write!(f, "[{}]", Color::Blue.paint("DEBUG")), + Level::INFO => write!(f, "[{}]", Color::Green.paint("INFO")), + Level::WARN => write!(f, "[{}]", Color::Yellow.paint("WARN")), + Level::ERROR => write!(f, "[{}]", Color::Red.paint("ERROR")), + } + } else { + match *self.level { + Level::TRACE => write!(f, "[TRACE]"), + Level::DEBUG => write!(f, "[DEBUG]"), + Level::INFO => write!(f, "[INFO]"), + Level::WARN => write!(f, "[WARN]"), + Level::ERROR => write!(f, "[ERROR]"), + } + } + } +} + +impl<'a> io::Write for WriteAdaptor<'a> { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + let s = + std::str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + self.fmt_writer + .write_str(s) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + Ok(s.as_bytes().len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +impl Default for DnaFormat { + fn default() -> Self { + let time_format = time::format_description::parse_owned::<2>( + r#"\[[month]-[day]|[hour]:[minute]:[second].[subsecond digits:3]\]"#, + ) + .expect("failed to parse time format"); + + Self { time_format } + } +} diff --git a/observability/src/lib.rs b/observability/src/lib.rs index 32703993..424b1397 100644 --- a/observability/src/lib.rs +++ b/observability/src/lib.rs @@ -1,21 +1,19 @@ //! # OpenTelemetry helpers -use std::{env, fmt}; +mod dna_fmt; + +use std::borrow::Cow; use error_stack::{Result, ResultExt}; -use opentelemetry::{ - global, - sdk::{ - self, export::metrics::aggregation::cumulative_temporality_selector, metrics::selectors, - Resource, - }, -}; -use opentelemetry_otlp::WithExportConfig; +use opentelemetry::global; +use opentelemetry::trace::TracerProvider; use tracing::Subscriber; pub use opentelemetry::metrics::{ObservableCounter, ObservableGauge}; +pub use opentelemetry::trace::{SpanContext, TraceContextExt}; pub use opentelemetry::{Context, Key, KeyValue}; use tracing_opentelemetry::MetricsLayer; +pub use tracing_opentelemetry::OpenTelemetrySpanExt; use tracing_subscriber::{prelude::*, registry::LookupSpan, EnvFilter, Layer}; pub use opentelemetry::metrics::{Counter, Meter}; @@ -28,8 +26,8 @@ pub type BoxedLayer = Box + Send + Sync>; pub struct OpenTelemetryInitError; impl error_stack::Context for OpenTelemetryInitError {} -impl fmt::Display for OpenTelemetryInitError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl std::fmt::Display for OpenTelemetryInitError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("failed to initialize opentelemetry") } } @@ -38,31 +36,49 @@ pub fn meter(name: &'static str) -> Meter { global::meter(name) } -pub fn init_opentelemetry() -> Result<(), OpenTelemetryInitError> { - // The otel sdk doesn't follow the disabled env variable flag. - // so we manually implement it to disable otel exports. - // we diverge from the spec by defaulting to disabled. - let sdk_disabled = env::var(OTEL_SDK_DISABLED) - .map(|v| v == "true") - .unwrap_or(true); - - if std::env::var("RUST_LOG").is_err() { - std::env::set_var("RUST_LOG", "info"); +/// Initialize OpenTelemetry. +/// +/// This function initializes the OpenTelemetry SDK and sets up the tracing and metrics layers. +/// It should be called once during the application startup. +/// +/// ```rs +/// use apibara_observability::init_opentelemetry; +/// +/// init_opentelemetry(env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")).unwrap(); +/// ``` +pub fn init_opentelemetry( + package_name: impl Into>, + package_version: impl Into>, +) -> Result<(), OpenTelemetryInitError> { + { + // The otel sdk doesn't follow the disabled env variable flag. + // so we manually implement it to disable otel exports. + // we diverge from the spec by defaulting to disabled. + let sdk_disabled = std::env::var(OTEL_SDK_DISABLED) + .map(|v| v == "true") + .unwrap_or(true); + + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", "info"); + } + + let mut layers = vec![stdout()]; + + if !sdk_disabled { + let otel_layer = otel(package_name, package_version)?; + layers.push(otel_layer); + } + + tracing_subscriber::registry().with(layers).init(); } - let mut layers = vec![stdout()]; - - if !sdk_disabled { - let otel_layer = otel()?; - layers.push(otel_layer); - } - - tracing_subscriber::registry().with(layers).init(); - Ok(()) } -fn otel() -> Result, OpenTelemetryInitError> +fn otel( + package_name: impl Into>, + version: impl Into>, +) -> Result, OpenTelemetryInitError> where S: Subscriber + Send + Sync, for<'a> S: LookupSpan<'a>, @@ -73,25 +89,24 @@ where // Both tracer and meter are configured with environment variables. let meter = opentelemetry_otlp::new_pipeline() - .metrics( - selectors::simple::inexpensive(), - cumulative_temporality_selector(), - opentelemetry::runtime::Tokio, - ) - .with_exporter(opentelemetry_otlp::new_exporter().tonic().with_env()) - .with_resource(Resource::default()) + .metrics(opentelemetry_sdk::runtime::Tokio) + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) .build() .change_context(OpenTelemetryInitError) .attach_printable("failed to create metrics pipeline")?; - let tracer = opentelemetry_otlp::new_pipeline() + let trace_provider = opentelemetry_otlp::new_pipeline() .tracing() - .with_exporter(opentelemetry_otlp::new_exporter().tonic().with_env()) - .with_trace_config(sdk::trace::config().with_resource(Resource::default())) - .install_batch(opentelemetry::runtime::Tokio) + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .install_batch(opentelemetry_sdk::runtime::Tokio) .change_context(OpenTelemetryInitError) .attach_printable("failed to create tracing pipeline")?; + let tracer = trace_provider + .tracer_builder(package_name) + .with_version(version) + .build(); + // export traces and metrics to otel let otel_trace_layer = tracing_opentelemetry::layer().with_tracer(tracer); let otel_metrics_layer = MetricsLayer::new(meter); @@ -124,7 +139,8 @@ where } else { tracing_subscriber::fmt::layer() .with_ansi(true) - .with_target(false) + .event_format(dna_fmt::DnaFormat::default()) + .fmt_fields(dna_fmt::DnaFormat::default()) .with_filter(log_env_filter) .boxed() } diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md deleted file mode 100644 index eaa11d08..00000000 --- a/operator/CHANGELOG.md +++ /dev/null @@ -1,38 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Common Changelog](https://common-changelog.org/), and -this project adheres to -[Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [0.2.2] - 2023-12-12 - -_Fix Indexer CRD._ - -### Fixed - -- Fix capitalization in the `Indexer` CRD. - -## [0.2.1] - 2023-12-12 - -_Limit operator to watch a single namespace._ - -### Added - -- Add a new `--namespace` flag to limit watching indexers in a single namespace. - -## [0.2.0] - 2023-12-11 - -_Add support for private GitHub repositories._ - -### Changed - -- Change the GitHub indexer source to allow cloning private repositories. - Users should create a private access token (PAT) and store it in a secret. Use - the secret together with the `access_token_env_var` to authenticate with GitHub - on clone. - -[0.2.2]: https://github.com/apibara/dna/releases/tag/operator/v0.2.2 -[0.2.1]: https://github.com/apibara/dna/releases/tag/operator/v0.2.1 -[0.2.0]: https://github.com/apibara/dna/releases/tag/operator/v0.2.0 diff --git a/operator/Cargo.toml b/operator/Cargo.toml deleted file mode 100644 index e45f0609..00000000 --- a/operator/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "apibara-operator" -version = "0.2.2" -edition.workspace = true - -[lib] -name = "apibara_operator" -path = "src/lib.rs" - -[[bin]] -name = "apibara-operator" -path = "src/bin.rs" - -[features] -default = ["operator"] -operator = [] - -[dependencies] -apibara-observability = { path = "../observability" } -error-stack.workspace = true -clap.workspace = true -ctrlc.workspace = true -futures.workspace = true -k8s-openapi = { version = "0.18.0", features = ["v1_26", "api", "schemars"] } -kube = { version = "0.83.0", features = [ - "client", - "derive", - "runtime", - "rustls-tls", -], default-features = false } -schemars = "0.8.12" -serde.workspace = true -serde_json.workspace = true -serde_yaml = "0.9.22" -tokio.workspace = true -tokio-stream.workspace = true -tokio-util.workspace = true -tracing.workspace = true diff --git a/operator/examples/README.md b/operator/examples/README.md deleted file mode 100644 index 2527f542..00000000 --- a/operator/examples/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Apibara Operator Examples - -This folder contains example manifests that you can use to test your Apibara -Operator installation. - -### Getting Started - -If you're deploying integrations that use data from the hosted streams, you must -configure your API Key in `apikey.yaml`. - -Change the value of the `production` key to your key, then deploy it with: - -```sh -kubectl apply -f apikey.yaml -``` - diff --git a/operator/examples/apikey.yaml b/operator/examples/apikey.yaml deleted file mode 100644 index c7b8588a..00000000 --- a/operator/examples/apikey.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - namespace: default - name: apibara-api-key -stringData: - production: dna_XXX - diff --git a/operator/examples/console.yml b/operator/examples/console.yml deleted file mode 100644 index d38bd882..00000000 --- a/operator/examples/console.yml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apibara.com/v1alpha2 -kind: Indexer -metadata: - namespace: default - name: console -spec: - source: - gitHub: - repo: dna - owner: apibara - revision: main - subpath: examples/console - sink: - script: starknet_to_console.js - type: console - env: - - name: AUTH_TOKEN - valueFrom: - secretKeyRef: - name: apibara-api-key - key: production diff --git a/operator/src/bin.rs b/operator/src/bin.rs deleted file mode 100644 index 7cf83690..00000000 --- a/operator/src/bin.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::collections::HashMap; - -use apibara_observability::init_opentelemetry; -use apibara_operator::{ - configuration::{Configuration, SinkConfiguration}, - controller, - crd::Indexer, - error::OperatorError, -}; -use clap::{Args, Parser, Subcommand}; -use error_stack::{Result, ResultExt}; -use kube::{Client, CustomResourceExt}; -use tokio_util::sync::CancellationToken; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Cli { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - /// Generate the operator CRDs and exit. - GenerateCrd(GenerateCrdArgs), - /// Start the operator. - Start(StartArgs), -} - -#[derive(Args, Debug)] -struct GenerateCrdArgs {} - -#[derive(Args, Debug)] -struct StartArgs { - #[clap(flatten)] - pub sink: SinkArgs, - /// Limit the namespace the operator watches. - #[arg(long, env)] - pub namespace: Option, -} - -#[derive(Args, Debug)] -struct SinkArgs { - /// Sink type to image mapping. - /// - /// Values are separated by commas, - /// e.g. `console=quay.io/apibara/sink-console:latest,mongo=quay.io/apibara/sink-mongo:latest`. - #[arg(long, env, value_delimiter = ',')] - pub sink_images: Option>, -} - -fn generate_crds(_args: GenerateCrdArgs) -> Result<(), OperatorError> { - let crds = [Indexer::crd()] - .iter() - .map(|crd| serde_yaml::to_string(&crd)) - .collect::, _>>() - .change_context(OperatorError) - .attach_printable("failed to serialize CRD to yaml")? - .join("---\n"); - println!("{}", crds); - Ok(()) -} - -async fn start(args: StartArgs) -> Result<(), OperatorError> { - let client = Client::try_default() - .await - .change_context(OperatorError) - .attach_printable("failed to build Kubernetes client")?; - let configuration = args - .into_configuration() - .attach_printable("invalid cli arguments")?; - let ct = CancellationToken::new(); - - ctrlc::set_handler({ - let ct = ct.clone(); - move || { - ct.cancel(); - } - }) - .change_context(OperatorError) - .attach_printable("failed to setup ctrl-c handler")?; - - controller::start(client, configuration, ct) - .await - .change_context(OperatorError) - .attach_printable("error while running operator")?; - Ok(()) -} - -#[tokio::main] -async fn main() -> Result<(), OperatorError> { - init_opentelemetry() - .change_context(OperatorError) - .attach_printable("failed to initialize opentelemetry")?; - let args = Cli::parse(); - - match args.command { - Command::GenerateCrd(args) => generate_crds(args)?, - Command::Start(args) => start(args).await?, - } - - Ok(()) -} - -impl StartArgs { - pub fn into_configuration(self) -> Result { - let mut configuration = Configuration { - namespace: self.namespace, - ..Configuration::default() - }; - - if let Some(sink_images) = self.sink.sink_images { - let mut sinks = HashMap::new(); - for image_kv in &sink_images { - match image_kv.split_once('=') { - Some((name, image)) if !image.contains('=') => { - sinks.insert( - name.to_string(), - SinkConfiguration { - image: image.to_string(), - }, - ); - } - _ => { - return Err(OperatorError) - .attach_printable_lazy(|| { - format!("invalid sink image mapping: {}", image_kv) - }) - .attach_printable("hint: expected format is `type=image`") - } - } - } - - configuration.with_sinks(sinks); - } - - Ok(configuration) - } -} diff --git a/operator/src/configuration.rs b/operator/src/configuration.rs deleted file mode 100644 index 90ddab9a..00000000 --- a/operator/src/configuration.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::collections::HashMap; - -static CONSOLE_IMAGE: &str = "quay.io/apibara/sink-console:latest"; -static MONGO_IMAGE: &str = "quay.io/apibara/sink-mongo:latest"; -static PARQUET_IMAGE: &str = "quay.io/apibara/sink-parquet:latest"; -static POSTGRES_IMAGE: &str = "quay.io/apibara/sink-postgres:latest"; -static WEBHOOK_IMAGE: &str = "quay.io/apibara/sink-webhook:latest"; - -#[derive(Debug, Clone)] -pub struct Configuration { - /// Sink type to image. - pub sinks: HashMap, - /// Sink status port. - pub status_port: i32, - /// Limit the namespace the operator watches. - pub namespace: Option, -} - -#[derive(Debug, Clone)] -pub struct SinkConfiguration { - /// The container image to use for the sink container. - pub image: String, -} - -impl Configuration { - pub fn with_sinks(&mut self, sinks: HashMap) -> &mut Self { - self.sinks = sinks; - self - } -} - -impl Default for Configuration { - fn default() -> Self { - let console = SinkConfiguration { - image: CONSOLE_IMAGE.to_string(), - }; - let mongo = SinkConfiguration { - image: MONGO_IMAGE.to_string(), - }; - let parquet = SinkConfiguration { - image: PARQUET_IMAGE.to_string(), - }; - let postgres = SinkConfiguration { - image: POSTGRES_IMAGE.to_string(), - }; - let webhook = SinkConfiguration { - image: WEBHOOK_IMAGE.to_string(), - }; - - let sinks = HashMap::from([ - ("console".to_string(), console), - ("mongo".to_string(), mongo), - ("parquet".to_string(), parquet), - ("postgres".to_string(), postgres), - ("webhook".to_string(), webhook), - ]); - - Configuration { - sinks, - status_port: 8118, - namespace: None, - } - } -} diff --git a/operator/src/context.rs b/operator/src/context.rs deleted file mode 100644 index 48f73e65..00000000 --- a/operator/src/context.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::fmt; - -use kube::Client; - -use crate::configuration::Configuration; - -#[derive(Clone)] -pub struct Context { - /// Kube client. - pub client: Client, - /// Operator configuration. - pub configuration: Configuration, -} - -#[derive(Debug)] -pub struct OperatorError; -impl error_stack::Context for OperatorError {} - -impl fmt::Display for OperatorError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("operator operation failed") - } -} diff --git a/operator/src/controller.rs b/operator/src/controller.rs deleted file mode 100644 index 66261066..00000000 --- a/operator/src/controller.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::fmt::Debug; - -use error_stack::{Result, ResultExt}; -use futures::{Future, Stream, StreamExt}; -use k8s_openapi::api; -use kube::{ - api::ListParams, - core::Resource, - runtime::{ - controller::{self, Action}, - reflector::ObjectRef, - watcher, Controller, - }, - Api, Client, -}; -use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; - -use crate::{ - configuration::Configuration, - context::{Context, OperatorError}, - crd::Indexer, - reconcile::{self, ReconcileError}, -}; - -pub type ReconcileItem = - std::result::Result<(ObjectRef, Action), controller::Error>; - -pub async fn create( - client: Client, - configuration: Configuration, - ct: CancellationToken, -) -> Result>, OperatorError> { - info!("Creating controller"); - - let namespace = configuration.namespace.clone(); - let ctx = Context { - client, - configuration, - }; - - let indexers = if let Some(namespace) = &namespace { - Api::::namespaced(ctx.client.clone(), namespace) - } else { - Api::::all(ctx.client.clone()) - }; - - if indexers.list(&ListParams::default()).await.is_err() { - error!("Indexer CRD not installed"); - return Err(OperatorError).attach_printable("indexer CRD not installed"); - } - - info!("CRD installed. Starting controllor loop"); - - let pods = Api::::all(ctx.client.clone()); - - let controller = Controller::new(indexers, watcher::Config::default()) - .owns(pods, watcher::Config::default()) - .graceful_shutdown_on(async move { - ct.cancelled().await; - }) - .run( - reconcile::reconcile_indexer, - reconcile::error_policy, - ctx.into(), - ); - - Ok(controller) -} - -pub async fn start( - client: Client, - configuration: Configuration, - ct: CancellationToken, -) -> Result<(), OperatorError> { - let controller = create(client, configuration, ct).await?; - - run_controller_to_end(controller).await; - - Ok(()) -} - -fn run_controller_to_end( - controller_stream: impl Stream>, -) -> impl Future -where - K: Resource + Debug, - ::DynamicType: Debug, -{ - controller_stream.for_each(|res| async move { - match res { - Ok((obj, action)) => info!(obj = ?obj, action = ?action, "reconcile success"), - Err(err) => warn!(err = ?err, "reconcile error"), - } - }) -} diff --git a/operator/src/crd.rs b/operator/src/crd.rs deleted file mode 100644 index df50c002..00000000 --- a/operator/src/crd.rs +++ /dev/null @@ -1,118 +0,0 @@ -use k8s_openapi::{ - api::core::v1::{EnvVar, EnvVarSource, Volume, VolumeMount}, - apimachinery::pkg::apis::meta::v1::{Condition, Time}, -}; -use kube::CustomResource; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -pub type HeaderValueSource = EnvVarSource; - -/// Run an indexer. -#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] -#[kube( - kind = "Indexer", - group = "apibara.com", - version = "v1alpha2", - namespaced, - printcolumn = r#"{"name": "Age", "type": "date", "jsonPath": ".metadata.creationTimestamp" }"#, - printcolumn = r#"{"name": "Status", "type": "string", "jsonPath": ".status.phase" }"#, - printcolumn = r#"{"name": "Instance", "type": "string", "jsonPath": ".status.instanceName" }"#, - printcolumn = r#"{"name": "Restarts", "type": "number", "jsonPath": ".status.restartCount" }"# -)] -#[kube(status = "IndexerStatus", shortname = "indexer")] -#[serde(rename_all = "camelCase")] -pub struct IndexerSpec { - /// Indexer source code. - pub source: IndexerSource, - /// Sink to run. - pub sink: Sink, - /// List of volumes that can be mounted by containers belonging to the indexer. - pub volumes: Option>, - /// List of environment variables to set in the indexer container. - pub env: Option>, -} - -#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase")] -pub enum IndexerSource { - /// Clone the indexer repository from GitHub. - GitHub(GitHubSource), - /// Use source code from a mounted volume. - Volume(VolumeSource), -} - -#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct GitHubSource { - /// GitHub repository owner, e.g. `my-org`. - pub owner: String, - /// GitHub repository name, e.g. `my-indexer`. - pub repo: String, - /// Git revision, e.g. `main` or `a746ab`. - pub revision: String, - /// Run the indexer from the specified subpath of the repository, e.g. `/packages/indexer`. - pub subpath: Option, - /// Environment variable containing the GitHub access token. - pub access_token_env_var: Option, - /// Additional flags to pass to `git clone`. - pub git_clone_flags: Option, - /// Additional flags to pass to `git clean`. - pub git_clean_flags: Option, -} - -#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct VolumeSource { - /// Path to the indexer source code, e.g. `/myvolume`. - /// - /// Use this option with the `volumes` field to mount a volume containing the indexer source - /// code. - pub path: String, -} - -#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct Sink { - /// Container image with the sink. - #[serde(flatten)] - pub sink: SinkType, - /// Path to the script to run. - pub script: String, - /// Arguments passed to the sink. - pub args: Option>, -} - -#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase", untagged)] -pub enum SinkType { - Type { r#type: String }, - Image { image: String }, -} - -#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct IndexerVolume { - /// Volume to mount. - pub volume: Volume, - /// Volume mount specification. - pub volume_mount: VolumeMount, -} - -/// Most recent status of the indexer. -#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct IndexerStatus { - /// Conditions of the indexer. - pub conditions: Option>, - /// The name of the container running the indexer. - pub instance_name: Option, - /// Service name exposing the indexer's status. - pub status_service_name: Option, - /// Current phase of the indexer. - pub phase: Option, - /// Number of times the indexer container has restarted. - pub restart_count: Option, - /// Creation timestamp of the indexer's pod. - pub pod_created: Option