-
Notifications
You must be signed in to change notification settings - Fork 60
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Release/v1.7.x #1733
Release/v1.7.x #1733
Conversation
WalkthroughThe changes in this pull request primarily involve updates to the Changes
Possibly related PRs
Suggested reviewers
Poem
Warning Rate limit exceeded@joe-bowman has exceeded the limit for the number of commits or files that can be reviewed per hour. Please wait 19 minutes and 19 seconds before requesting another review. ⌛ How to resolve this issue?After the wait time has elapsed, a review can be triggered using the We recommend that you space out your commits to avoid hitting the rate limit. 🚦 How do rate limits work?CodeRabbit enforces hourly rate limits for each developer per organization. Our paid plans have higher rate limits than the trial, open-source and free plans. In all cases, we re-allow further reviews after a brief timeout. Please see our FAQ for further information. Warning There were issues while running some tools. Please review the errors and either fix the tool’s configuration or disable the tool if it’s a critical failure. 🔧 golangci-lintlevel=error msg="Running error: context loading failed: failed to load packages: failed to load packages: failed to load with go/packages: context deadline exceeded" Thank you for using CodeRabbit. We offer it for free to the OSS community and would appreciate your support in helping us grow. If you find it useful, would you consider giving us a shout-out on your favorite social media? 🪧 TipsChatThere are 3 ways to chat with CodeRabbit:
Note: Be mindful of the bot's finite context window. It's strongly recommended to break down tasks such as reading entire modules into smaller chunks. For a focused discussion, use review comments to chat about specific files and their changes, instead of using the PR comments. CodeRabbit Commands (Invoked using PR comments)
Other keywords and placeholders
CodeRabbit Configuration File (
|
c := make(chan os.Signal, 1) | ||
signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGABRT) | ||
|
||
go runner.Run(ctx, &config, CreateErrHandler(c)) |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
@@ -165,59 +140,57 @@ | |||
v.Events["source"] = []string{chainId} | |||
// why does this always trigger twice? messages are deduped later, but this causes 2x queries to trigger. | |||
time.Sleep(75 * time.Millisecond) // try to avoid thundering herd. | |||
go handleEvent(v, log.With(logger, "worker", "chainClient", "chain", defaultClient.Config.ChainID), metrics) | |||
go handleEvent(cfg, v, log.With(logger, "worker", "chainClient", "chain", cfg.DefaultChain.ChainID), metrics) |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
go func(defaultClient *types.ChainConfig, srcClient *types.ReadOnlyChainConfig, logger log.Logger) { | ||
defer wg.Done() | ||
CNT: | ||
for { | ||
time.Sleep(HistoricQueryInterval) | ||
req := &qstypes.QueryRequestsRequest{ | ||
Pagination: &querytypes.PageRequest{Limit: 500}, | ||
ChainId: srcClient.ChainID, | ||
} | ||
|
||
bz := defaultClient.Codec.Marshaler.MustMarshal(req) | ||
metrics.HistoricQueryRequests.WithLabelValues("historic_requests").Inc() | ||
var res *coretypes.ResultABCIQuery | ||
if err = retry.Do(func() error { | ||
res, err = defaultClient.RPCClient.ABCIQuery(ctx, "/quicksilver.interchainquery.v1.QuerySrvr/Queries", bz) | ||
return err | ||
}, RtyAtt, RtyDel, RtyErr); err != nil { | ||
continue CNT | ||
} | ||
out := &qstypes.QueryRequestsResponse{} | ||
err = defaultClient.Codec.Marshaler.Unmarshal(res.Response.Value, out) | ||
bz := cfg.ProtoCodec.MustMarshal(req) | ||
metrics.HistoricQueryRequests.WithLabelValues("historic_requests").Inc() | ||
var res *coretypes.ResultABCIQuery | ||
if err = retry.Do(func() error { | ||
res, err = defaultClient.Client.ABCIQuery(ctx, "/quicksilver.interchainquery.v1.QuerySrvr/Queries", bz) | ||
return err | ||
}, RtyAtt, RtyDel, RtyErr); err != nil { | ||
continue CNT | ||
} | ||
out := qstypes.QueryRequestsResponse{} | ||
err = cfg.ProtoCodec.Unmarshal(res.Response.Value, &out) | ||
if err != nil { | ||
err := logger.Log("msg", "Error: Unable to unmarshal: ", "error", err) | ||
if err != nil { | ||
err := logger.Log("msg", "Error: Unable to unmarshal: ", "error", err) | ||
if err != nil { | ||
return | ||
} | ||
continue CNT | ||
return | ||
} | ||
_ = logger.Log("worker", "chainClient", "msg", "fetched historic queries for chain", "count", len(out.Queries)) | ||
continue CNT | ||
} | ||
_ = logger.Log("worker", "chainClient", "msg", "fetched historic queries for chain", "count", len(out.Queries)) | ||
|
||
if len(out.Queries) > 0 { | ||
go handleHistoricRequests(out.Queries, defaultClient.Config.ChainID, log.With(logger, "worker", "historic"), metrics) | ||
} | ||
if len(out.Queries) > 0 { | ||
go handleHistoricRequests(cfg, srcClient, out.Queries, cfg.DefaultChain.ChainID, log.With(logger, "worker", "historic"), metrics) | ||
} | ||
}(defaultClient, chainClient, log.With(logger, "chain", defaultClient.Config.ChainID, "src_chain", chainClient.Config.ChainID)) | ||
} | ||
} | ||
}(cfg.DefaultChain, chainCfg, log.With(logger, "chain", cfg.DefaultChain.ChainID, "src_chain", chainCfg.ChainID)) |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
go handleHistoricRequests(out.Queries, defaultClient.Config.ChainID, log.With(logger, "worker", "historic"), metrics) | ||
} | ||
if len(out.Queries) > 0 { | ||
go handleHistoricRequests(cfg, srcClient, out.Queries, cfg.DefaultChain.ChainID, log.With(logger, "worker", "historic"), metrics) |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
metrics.HistoricQueries.WithLabelValues("historic-queries").Set(float64(len(queries))) | ||
|
||
if len(queries) == 0 { | ||
return | ||
} | ||
|
||
rand.Seed(time.Now().UnixNano()) | ||
rand.Shuffle(len(queries), func(i, j int) { queries[i], queries[j] = queries[j], queries[i] }) | ||
rand.New(rand.NewSource(time.Now().UnixNano())).Shuffle(len(queries), func(i, j int) { queries[i], queries[j] = queries[j], queries[i] }) |
Check warning
Code scanning / CodeQL
Calling the system time Warning
go func() { | ||
err := asyncCacheClientUpdate(ctx, cfg, cfg.Chains[msg.ClientUpdate.ChainId], Query{ConnectionId: msg.ClientUpdate.ConnectionId, Height: msg.ClientUpdate.Height}, msg.ClientUpdate.Height, logger, metrics) | ||
if err != nil { | ||
_ = logger.Log("msg", fmt.Sprintf("Error: Could not submit client update %s", err)) | ||
} | ||
}() |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
_ = logger.Log("msg", "Tx already in mempool") | ||
case strings.Contains(err.Error(), "request body too large"): | ||
TxMsgs = TxMsgs / 4 * 3 | ||
LastReduced = time.Now() |
Check warning
Code scanning / CodeQL
Calling the system time Warning
//fmt.Println("prepareMessages", idx) | ||
if len(list) > TxMsgs { | ||
//fmt.Println("transaction full; requeueing") | ||
go func(entry Message) { time.Sleep(time.Second * 2); sendQueue <- entry }(entry) // client update not ready; requeue. |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
cu, err := getCachedClientUpdate(entry.ClientUpdate.ConnectionId, entry.ClientUpdate.Height) | ||
if err != nil { | ||
//fmt.Println("client update not ready; requeueing") | ||
go func(entry Message) { time.Sleep(time.Second * 2); sendQueue <- entry }(entry) // client update not ready; requeue. |
Check notice
Code scanning / CodeQL
Spawning a Go routine Note
return "", 65536, err | ||
} | ||
|
||
gas := uint64(float64(simRes.GasInfo.GasUsed) * c.GasMultiplier) |
Check notice
Code scanning / CodeQL
Floating point arithmetic Note
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 36
🧹 Outside diff range and nitpick comments (17)
icq-relayer/Dockerfile (1)
18-18
: Consider pinning package versions for reproducible builds.While the current package installation is functional, consider pinning specific versions to ensure reproducible builds and prevent potential issues from package updates.
-RUN apk add --no-cache ca-certificates jq curl +RUN apk add --no-cache \ + ca-certificates=20230506-r0 \ + jq=1.7.1-r0 \ + curl=8.5.0-r0icq-relayer/Makefile (1)
15-16
: LGTM! Consider adding version validation.The new
dockerx
target correctly implements cross-platform building for linux/amd64. However, given the strict version requirements mentioned in the PR (icq-relayer v1.1.0+), consider adding version validation.Add version validation to prevent building incompatible versions:
version ?= latest +# Add this at the beginning of the dockerx target +dockerx: + @if [ "${version}" = "latest" ]; then \ + echo "Error: Specific version required. Use version=X.X.X"; \ + exit 1; \ + fi docker buildx build --platform linux/amd64 -f Dockerfile . -t quicksilverzone/interchain-queries:${version}proto/quicksilver/proofs/proofs.proto (2)
10-13
: LGTM! Consider adding field documentation.The CelestiaProof message structure is well-defined and aligns with the PR objectives.
Consider adding field comments to document:
- The purpose and format of the share_proof field
- The meaning and valid ranges of the index field
message CelestiaProof { + // share_proof contains the Celestia-specific proof data for transaction verification celestia.core.v1.proof.ShareProof share_proof = 1; + // index represents the position of the transaction in the block uint32 index = 2; }
15-17
: LGTM! Consider adding message documentation.The TendermintProof message structure is appropriately defined for handling Tendermint transaction proofs.
Consider adding documentation to clarify the message's purpose:
+// TendermintProof encapsulates the proof data required to verify transaction inclusion +// in a Tendermint/CometBFT-based blockchain message TendermintProof { + // tx_proof contains the Tendermint-specific proof data for transaction verification tendermint.types.TxProof tx_proof = 1; }proto/celestia/proof.proto (1)
51-57
: Enhance documentation for reference Proof messageConsider improving the documentation of the commented-out Proof message to clearly indicate:
- Why it's included as a reference
- Which merkle package it's from
- How it relates to the other proof types
-// Proof is taken from the merkle package +// Reference: Original Proof message from the Tendermint merkle package. +// This is included to show the base structure that influenced our proof designs. // message Proof {icq-relayer/cmd/root.go (1)
Line range hint
1-72
: Breaking Changes: Update DocumentationGiven the significant changes in configuration and key management, along with the breaking changes in proof handling mentioned in the PR objectives, the command's help text should be updated to reflect:
- New configuration requirements
- Minimum version requirements (icq-relayer v1.1.0+)
- Changes in proof handling
Consider updating the
Long
description inrootCmd
:var rootCmd = &cobra.Command{ Use: "icq-relayer", Short: "A relayer for the Quicksilver interchain queries module", - Long: `A relayer for Quicksilver interchain-queries, allowing cryptographically verifiable cross-chain KV lookups.`, + Long: `A relayer for Quicksilver interchain-queries, allowing cryptographically verifiable cross-chain KV lookups. + +Requirements: +- Compatible with Quicksilver v1.7.0+ +- Supports Celestia Inclusion proofs +- Minimum version: v1.1.0`, }x/interchainstaking/keeper/callbacks_test.go (1)
Line range hint
2230-2234
: Consider adding Celestia proof test cases.Since this PR introduces Celestia Inclusion proof handling, consider adding specific test cases to validate the
CelestiaProof
scenarios.Example test case structure:
func (suite *KeeperTestSuite) TestCheckTMHeaderForZoneWithCelestiaProof() { // Test cases for Celestia proof validation testCases := []struct { name string setupProof func() *CelestiaProof expectError bool }{ // Add various Celestia proof scenarios } // ... test implementation }utils/proofs/proofs.go (1)
60-63
: Clarify error messages for proof validation failuresConsider providing more context in the error messages when proof validation fails to aid in debugging.
Apply this diff to enhance error messages:
err = tmproof.Validate(dataHash) if err != nil { - return nil, fmt.Errorf("unable to validate proof: %w", err) + return nil, fmt.Errorf("unable to validate Tendermint proof against provided data hash: %w", err) }third-party-chains/celestia-types/types/row_proof.go (1)
14-86
: Add unit tests forRowProof
and its methodsTo ensure the correctness and reliability of the
RowProof
struct and its methodsValidate
andVerifyProof
, it's recommended to add unit tests that cover various scenarios, including edge cases and potential error conditions.Would you like assistance in generating unit tests for these methods?
icq-relayer/cmd/run.go (1)
132-132
: Make thehome
flag persistent across commandsThe
home
flag is added tostartCommand
at line 132 but may be needed by other commands as well. Consider making thehome
flag persistent by adding it to the root command so that it is available across all subcommands.icq-relayer/pkg/types/rpc.go (3)
15-16
: Review the practice of copying code from external packagesThe comment indicates that the
RPCClient
is "copy and pasted" fromgithub.com/tendermint/tendermint/rpc/client/http
. Copying code from external packages may lead to licensing issues and increased maintenance overhead if the original code changes. Consider the following alternatives:
- Contribute to the original package: Submit a pull request to Tendermint to expose the
baseRPCClient
or the necessary functionalities, allowing you to use the official client without modification.- Use composition or embedding: Instead of copying, embed the existing
rpchttp.HTTP
client and add any additional methods or override functionalities as needed.- Wrap the existing client: Create a wrapper around
rpchttp.HTTP
that adds your required functionality without duplicating code.
18-19
: Encapsulate the internal caller by making the field unexportedThe
caller
field in theRPCClient
struct is exported. To promote encapsulation and prevent external modification, consider making it unexported by renaming it tocaller
(with a lowercase 'c'). This hides the internal implementation details from consumers of the package.
68-90
: Simplify duplicate code inABCIQuery
andABCIQueryWithOptions
methodsThe
ABCIQuery
method callsABCIQueryWithOptions
with default options. To reduce code duplication and improve readability, consider documenting this relationship clearly or refactoring if additional logic is required.icq-relayer/pkg/types/client.go (1)
381-381
: Consider making the HD path configurableThe HD path is currently hard-coded to
m/44'/118'/0'/0/0
. If different accounts or derivation paths are needed, consider making the HD path configurable or documenting why this path is appropriate.icq-relayer/pkg/runner/run.go (3)
43-47
: Add JSON struct tags to exported struct fieldsThe fields in the
ClientUpdateRequirement
struct are exported but lack JSON struct tags. Adding JSON tags ensures proper serialization and consistent field names when encoding to JSON.Apply this change:
type ClientUpdateRequirement struct { - ConnectionId string - ChainId string - Height int64 + ConnectionId string `json:"connection_id"` + ChainId string `json:"chain_id"` + Height int64 `json:"height"` }
49-52
: Add JSON struct tags to the 'Message' struct fieldsSimilarly, the
Message
struct's exported fields lack JSON tags. Including JSON tags ensures consistent serialization, especially if these structs are marshaled to JSON in the future.Apply this change:
type Message struct { - Msg sdk.Msg - ClientUpdate *ClientUpdateRequirement + Msg sdk.Msg `json:"msg"` + ClientUpdate *ClientUpdateRequirement `json:"client_update,omitempty"` }
593-600
: Review time-based logic for potential non-determinismUsing
LastReduced.Add(time.Second * 30).Before(time.Now())
depends on system time, which may introduce non-determinism. Ensure that this time-based logic is acceptable and consider mocking time in tests to improve testability.🧰 Tools
🪛 GitHub Check: CodeQL
[warning] 593-593: Calling the system time
Calling the system time may be a possible source of non-determinism
[warning] 600-600: Calling the system time
Calling the system time may be a possible source of non-determinism
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (7)
go.sum
is excluded by!**/*.sum
go.work
is excluded by!**/*.work
go.work.sum
is excluded by!**/*.sum
icq-relayer/go.sum
is excluded by!**/*.sum
third-party-chains/celestia-types/proto-types/proof.pb.go
is excluded by!**/*.pb.go
utils/proofs/proofs.pb.go
is excluded by!**/*.pb.go
x/interchainquery/types/query.pb.go
is excluded by!**/*.pb.go
📒 Files selected for processing (23)
- Makefile (2 hunks)
- go.mod (4 hunks)
- icq-relayer/.dockerignore (1 hunks)
- icq-relayer/Dockerfile (1 hunks)
- icq-relayer/Makefile (1 hunks)
- icq-relayer/cmd/config.go (1 hunks)
- icq-relayer/cmd/keys.go (0 hunks)
- icq-relayer/cmd/root.go (2 hunks)
- icq-relayer/cmd/run.go (1 hunks)
- icq-relayer/go.mod (7 hunks)
- icq-relayer/pkg/config/config.go (0 hunks)
- icq-relayer/pkg/runner/run.go (12 hunks)
- icq-relayer/pkg/types/client.go (1 hunks)
- icq-relayer/pkg/types/config.go (1 hunks)
- icq-relayer/pkg/types/rpc.go (1 hunks)
- proto/celestia/proof.proto (1 hunks)
- proto/quicksilver/interchainquery/v1/query.proto (2 hunks)
- proto/quicksilver/proofs/proofs.proto (1 hunks)
- third-party-chains/celestia-types/types/row_proof.go (1 hunks)
- third-party-chains/celestia-types/types/share_proof.go (1 hunks)
- utils/proofs/proofs.go (1 hunks)
- x/interchainstaking/keeper/callbacks.go (6 hunks)
- x/interchainstaking/keeper/callbacks_test.go (1 hunks)
💤 Files with no reviewable changes (2)
- icq-relayer/cmd/keys.go
- icq-relayer/pkg/config/config.go
✅ Files skipped from review due to trivial changes (2)
- icq-relayer/.dockerignore
- icq-relayer/cmd/config.go
🧰 Additional context used
🪛 GitHub Check: CodeQL
icq-relayer/cmd/run.go
[notice] 122-122: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismicq-relayer/pkg/runner/run.go
[notice] 143-143: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 159-193: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 190-190: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[warning] 216-216: Calling the system time
Calling the system time may be a possible source of non-determinism
[notice] 270-270: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 341-341: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[warning] 593-593: Calling the system time
Calling the system time may be a possible source of non-determinism
[warning] 600-600: Calling the system time
Calling the system time may be a possible source of non-determinism
[notice] 612-617: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[warning] 648-648: Calling the system time
Calling the system time may be a possible source of non-determinism
[notice] 687-687: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 712-712: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismicq-relayer/pkg/types/client.go
[notice] 532-532: Floating point arithmetic
Floating point arithmetic operations are not associative and a possible source of non-determinism
🪛 buf
proto/celestia/proof.proto
4-4: import "tendermint/crypto/proof.proto": file does not exist
(COMPILE)
proto/quicksilver/proofs/proofs.proto
4-4: import "celestia/proof.proto": file does not exist
(COMPILE)
🪛 GitHub Check: devskim
third-party-chains/celestia-types/types/share_proof.go
[notice] 31-31: A "TODO" or similar was left in source code, possibly indicating incomplete functionality
Suspicious comment
🔇 Additional comments (35)
icq-relayer/Dockerfile (1)
18-18
: LGTM! Package additions align with PR objectives.The addition of
jq
andcurl
is appropriate for handling JSON-formatted proof data and making HTTP requests, which may be necessary for processing Celestia Inclusion proofs and interacting with blockchain nodes.proto/quicksilver/proofs/proofs.proto (2)
4-4
:⚠️ Potential issueVerify the Celestia proto import path and dependency.
The static analysis indicates that "celestia/proof.proto" cannot be found. This import is critical for the CelestiaProof message definition.
Let's verify the proto file locations and dependencies:
Please ensure that:
- The Celestia proto files are properly vendored or included in your project
- The import path is correctly configured in your buf.yaml
- All necessary proto dependencies are declared in your buf.work.yaml
✅ Verification successful
Let me check the content of these buf files to understand the dependency configuration.
Let me try one more search to find Celestia-related files, this time without type filtering:
Import path "celestia/proof.proto" is correctly configured
The import path is valid as the proto file exists at
proto/celestia/proof.proto
relative to the repository root, which matches the import statementimport "celestia/proof.proto"
. The dependency is properly vendored in the project.🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Check for Celestia proto files and their locations # Test: Look for Celestia-related proto files fd -e proto -d 3 celestia # Test: Check if there are any buf.yaml or buf.work.yaml files that might define dependencies fd "buf\.(yaml|work\.yaml)$"Length of output: 87
Script:
#!/bin/bash # Check buf configuration files echo "=== buf.work.yaml content ===" cat buf.work.yaml echo -e "\n=== proto/buf.yaml content ===" cat proto/buf.yaml # Also check if there are any other proto files that might reference Celestia echo -e "\n=== Files containing 'celestia' ===" rg -l "celestia" --type protoLength of output: 743
Script:
#!/bin/bash # Search for files containing 'celestia' in their content or name echo "=== Files containing 'celestia' (case-insensitive) ===" rg -il celestia echo -e "\n=== Files with 'celestia' in name ===" fd -I celestiaLength of output: 727
🧰 Tools
🪛 buf
4-4: import "celestia/proof.proto": file does not exist
(COMPILE)
1-17
: Verify related proto file updates for breaking change.Since this introduces new proof types as part of a breaking change (replacing
proof
withproof_any
inGetTxWithProofResponse
), we should verify that all related proto files are updated consistently.Let's check for related proto definitions:
🧰 Tools
🪛 buf
4-4: import "celestia/proof.proto": file does not exist
(COMPILE)
proto/quicksilver/interchainquery/v1/query.proto (2)
9-9
: LGTM: Required import for new proof typeThe addition of
google/protobuf/any.proto
import is necessary to support the newproof_any
field.
44-44
: Breaking Change: Verify compatibility with existing clientsThis represents a significant breaking change in the API:
- The
proof
field (line 44) is now deprecated as of v1.7.0- The new
proof_any
field (line 48) replaces it, using a more flexiblegoogle.protobuf.Any
type that can accommodate different proof implementations (Tendermint, Celestia)Please ensure:
- All clients are notified about this breaking change
- Documentation is updated to reflect:
- The new proof format requirements
- The minimum version requirements (icq-relayer v1.1.0+ with quicksilver v1.7.0+)
- Migration guidelines for existing implementations
Let's verify the impact:
Also applies to: 47-48
✅ Verification successful
Let me gather more information to verify the impact of this change.
Let me gather one final piece of information to check the actual implementation and usage of the new proof_any field.
Based on the gathered information, I can now provide the final response:
Breaking Change Verified: Minimal Impact, Migration Path Available
The deprecation of
proof
field in favor ofproof_any
represents a forward-compatible change:
- The old
proof
field is marked as deprecated but still present, allowing existing clients to function- The new
proof_any
field usinggoogle.protobuf.Any
provides extensibility for different proof types- Implementation shows proper protobuf field numbering (proof=3, proof_any=5) maintaining backward compatibility
The impact is limited to:
icq-relayer/pkg/runner/run.go
x/interchainstaking/keeper/callbacks.go
x/interchainstaking/keeper/callbacks_test.go
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for direct usage of the deprecated proof field rg -l "\.proof\s*=[^=]" --type proto # Search for potential client implementations that might be affected rg -l "GetTxWithProofResponse" --type goLength of output: 260
Script:
#!/bin/bash # Check the actual usage of the proof field in Go code rg -A 5 "\.proof\s*[^=]*=" # Check for any migration guides or documentation about this change fd README.md | xargs rg -i "proof.*deprecat|v1.7.0" # Check the commit history for related changes git log -p --grep="proof.*deprecat|v1.7.0" # Look for any version constraints or compatibility notes fd "go.mod|*.toml" | xargs rg "v1.7.0"Length of output: 11294
Script:
#!/bin/bash # Check for the implementation and usage of proof_any rg -A 5 "proof_any" # Check for any recent changes in the interchainquery module rg -l "interchainquery.*proof" --type goLength of output: 909
proto/celestia/proof.proto (4)
9-17
: Well-structured ShareProof message definitionThe ShareProof message is well-documented and properly structured with all necessary fields for Celestia share proofs. The field numbering and types are appropriate for the use case.
19-27
: Well-defined RowProof message with external dependencyThe RowProof message is well-structured and properly documented. The use of
tendermint.crypto.Proof
is appropriate for the Merkle proof implementation.#!/bin/bash # Verify the version compatibility of Tendermint types being used rg -l "tendermint/crypto/proof" | xargs rg "go.mod" -A 5 -B 5
29-49
: Verify byte size constraints in implementationThe documentation specifies exact byte sizes:
- Nodes should be 48 bytes each (including max/min namespaces and hash)
- Leaf hashes should be 40 bytes (including namespace and hash)
Please ensure these constraints are validated in the implementation code.
#!/bin/bash # Search for validation of byte sizes in implementation rg -A 5 "NMTProof" | grep -i "byte"
4-5
: Verify Tendermint proto imports availabilityThe import
tendermint/crypto/proof.proto
is flagged as missing. Please ensure these proto files are available in your proto path during compilation.✅ Verification successful
Based on the results showing that Tendermint/CometBFT dependencies are properly declared in the go.mod file (including
github.com/tendermint/tendermint v0.34.33
which is replaced with CometBFT), I can now generate the final response:Tendermint proto imports are valid
The imports from Tendermint are valid as the project correctly depends on Tendermint/CometBFT v0.34.33, which includes these proto definitions. The proto files will be available during compilation through the vendored dependencies.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Check if Tendermint proto files are available in common proto paths fd -t f "proof.proto" | grep -i "tendermint/crypto"Length of output: 2033
Script:
#!/bin/bash # Let's check for Tendermint dependencies and proto files in the repository fd -t f "go.mod" | xargs cat | grep -i tendermint # Also check for any other proto files that might be importing Tendermint protos rg -l "import.*tendermint" --type protoLength of output: 676
🧰 Tools
🪛 buf
4-4: import "tendermint/crypto/proof.proto": file does not exist
(COMPILE)
icq-relayer/cmd/root.go (2)
72-72
: Verify the removal of key management functionality.The key management command has been commented out, but there's no clear indication of what replaces this functionality. This could impact how the relayer manages keys for different chains.
Let's check for alternative key management implementations:
#!/bin/bash # Search for new key management patterns echo "Searching for new key management code..." rg -g '!vendor/*' -g '*.go' 'func.*[Kk]ey' -A 5 echo "Checking for potential replacement files..." fd -g '*key*.go'
48-54
: Verify the impact of removing configuration initialization.The removal of configuration initialization could have significant implications:
- How is the relayer now configured for different chains?
- How are the inclusion proof settings managed without configuration?
This change seems particularly important given that this release requires icq-relayer v1.1.0+ and has breaking changes in proof handling.
Let's verify how configuration is now managed:
icq-relayer/pkg/types/config.go (1)
16-26
: Consider adding proof validation configuration.Given that this release introduces Celestia Inclusion proofs and requires icq-relayer v1.1.0+, consider adding configuration fields for proof validation settings (e.g., proof types allowed, validation timeouts, etc.).
icq-relayer/go.mod (3)
3-3
: Verify Go version requirementThe upgrade to Go 1.23.2 is a significant change that might affect build environments and CI systems. Ensure that:
- All development and production environments can support this version
- CI/CD pipelines are updated accordingly
- Team members are notified about this requirement
42-43
: LGTM: Celestia dependencies addedThe addition of Celestia-related dependencies aligns with the PR objectives for handling Celestia Inclusion proofs.
197-197
: Verify the local replace directiveThe local replace directive
github.com/quicksilver-zone/quicksilver => ..
might cause issues in production. Ensure this is intended only for development and is properly managed in the release process.✅ Verification successful
Local replace directive is intended for development
The local replace directive
github.com/quicksilver-zone/quicksilver => ..
is correctly used in this context. This is a monorepo setup where the ICQ relayer is a submodule of the main Quicksilver project, and the replace directive is necessary for local development. The Makefile confirms this is a development configuration with proper build processes in place.🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Check if there are any other go.mod files with similar replace directives fd -e mod -x grep -l "quicksilver => \.\."Length of output: 65
Script:
#!/bin/bash # Check the content of go.mod to understand the context better cat icq-relayer/go.mod # Also check if there are any build or release scripts that might handle these replace directives fd -t f "build|release|deploy" -x grep -l "go.mod"Length of output: 9917
Script:
#!/bin/bash # Check if this is a monorepo structure by looking at the parent directory ls -la ../ # Check for Makefile or any build configurations that might handle these replace directives fd Makefile -x cat {} \;Length of output: 17380
Makefile (1)
41-41
: LGTM: Addition of osusergo build tagThe
osusergo
build tag is a good addition as it ensures user/group ID lookups use pure Go implementation instead of cgo, making the binary more portable across different environments.go.mod (4)
9-10
: LGTM: New Celestia dependencies align with PR objectives.The addition of
github.com/celestiaorg/go-square/v2
andgithub.com/celestiaorg/nmt
aligns with the PR's objective of handling Celestia Inclusion proofs.
336-336
: LGTM: Consistent version update.The update of
google.golang.org/genproto/googleapis/rpc
to versionv0.0.0-20240701130421-f6361c86f094
maintains consistency with other Google API dependencies.
Line range hint
344-362
: LGTM: Well-documented replace directives.The replace directives are well-documented with clear explanations, particularly for security fixes like the GHSA-h395-qcrw-5vmq vulnerability in gin-gonic/gin.
316-319
: Verify OpenTelemetry version compatibility.All OpenTelemetry packages have been consistently updated to v1.28.0. However, ensure that this version is compatible with your observability infrastructure.
#!/bin/bash # Check for any OpenTelemetry configuration files rg -l "opentelemetry" --type yaml --type jsonx/interchainstaking/keeper/callbacks_test.go (1)
2230-2230
: LGTM! Verify comprehensive test coverage.The updated method call correctly reflects the breaking change to use
tmclienttypes.Header
. However, let's verify that all test scenarios are covered.✅ Verification successful
Test coverage is comprehensive, no additional test cases needed
The test suite already includes a comprehensive set of test cases for
CheckTMHeaderForZone
, covering:
- Happy path (successful validation)
- Client state fetch failure
- Consensus state validation
- Header verification with time-based checks
- Proof marshaling errors
- Data hash verification
The changes to use
tmclienttypes.Header
are properly tested, and the test cases cover the core header validation scenarios. Since the proof validation is now handled at a lower level by the IBC keeper, specific Celestia proof test cases are not needed in this context.🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Check for comprehensive test coverage of header validation scenarios # Look for other test files that might need similar updates rg -l "GetTxWithProofResponse" --type go # Check for existing Celestia-related test cases rg -i "celestia.*test" --type goLength of output: 226
Script:
#!/bin/bash # Let's analyze the changes in the test file more thoroughly # Check the context around CheckTMHeaderForZone tests rg -B 3 -A 3 "CheckTMHeaderForZone" x/interchainstaking/keeper/callbacks_test.go # Look for test cases related to proof validation rg -B 3 -A 3 "TestCallback.*Proof" x/interchainstaking/keeper/callbacks_test.go # Check the implementation to understand what we're testing ast-grep --pattern 'func (k Keeper) CheckTMHeaderForZone($$$) { $$$ }'Length of output: 571
Script:
#!/bin/bash # Let's examine the test cases in more detail # Get the full test function and its test cases rg -A 50 "func.*TestCheckTMHeaderForZone" x/interchainstaking/keeper/callbacks_test.go # Check the implementation file for the actual method rg -A 20 "func.*CheckTMHeaderForZone" x/interchainstaking/keeper/callbacks.goLength of output: 3228
utils/proofs/proofs.go (2)
14-18
: InterfaceInclusionProof
is well-defined and appropriateThe
InclusionProof
interface correctly extendsproto.Message
and defines theValidate
method, ensuring consistency across different proof types.
65-65
: Confirmtmproof.Data
is not nil before returningWhile it's unlikely, ensure that
tmproof.Data
is notnil
before returning it to prevent unexpectednil
values.Run the following script to check for potential nil
Data
fields:icq-relayer/cmd/run.go (1)
122-122
: Potential non-determinism due to goroutine spawningSpawning a goroutine at line 122 may introduce non-determinism if shared resources are not properly synchronized. Ensure that any shared variables accessed by
runner.Run
are protected with appropriate synchronization mechanisms to prevent data races and inconsistent states.🧰 Tools
🪛 GitHub Check: CodeQL
[notice] 122-122: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismicq-relayer/pkg/types/rpc.go (3)
21-27
: Ensure all interface methods are implementedThe
RPCClientI
interface includes several embedded interfaces from the Tendermint RPC client package. Verify thatRPCClient
implements all the methods required by these interfaces, especially if there are updates to the Tendermint RPC client interfaces in future versions.
48-56
: Confirm proper error handling in theStatus
methodThe
Status
method correctly initializes the result and handles errors from the RPC call. Ensure that theresult
returned is valid and that any edge cases are appropriately managed.
317-346
: Handle deprecated parameters inTxSearch
methodThe
TxSearch
method usespage
andperPage
parameters, which may be deprecated in future Tendermint versions in favor of pagination tokens or other mechanisms. Ensure compatibility with different Tendermint versions or handle deprecation appropriately.icq-relayer/pkg/runner/run.go (1)
648-649
:⚠️ Potential issueAvoid potential underflow when adjusting 'TxMsgs'
Reducing
TxMsgs
without checking for minimum limits could lead to it becoming zero or negative, causing runtime errors. Ensure thatTxMsgs
stays within a reasonable range.Apply this check:
TxMsgs = TxMsgs / 4 * 3 +if TxMsgs < 1 { + TxMsgs = 1 +}Likely invalid or redundant comment.
🧰 Tools
🪛 GitHub Check: CodeQL
[warning] 648-648: Calling the system time
Calling the system time may be a possible source of non-determinismx/interchainstaking/keeper/callbacks.go (7)
32-32
: Import statement added for proofs packageThe addition of
github.com/quicksilver-zone/quicksilver/utils/proofs
is necessary for handling inclusion proofs in the updated functions.
450-450
: Update ofCheckTMHeaderForZone
function signatureThe function signature change in
CheckTMHeaderForZone
now accepts*tmclienttypes.Header
instead oficqtypes.GetTxWithProofResponse
. This change aligns the function with its purpose of validating Tendermint headers directly and improves abstraction.
505-508
: Enhanced error handling when unmarshalling argumentsThe added error logging when unmarshalling
args
intores
provides clearer insight into failures during the unmarshalling process.
512-513
: Proper unpacking of inclusion proofIntroducing
inclusionProof
of typeproofs.InclusionProof
and unpacking it usingk.cdc.UnpackAny
enhances the flexibility to handle different proof types.
518-521
: Validation of transaction bytes using inclusion proofValidating
txBytes
withinclusionProof.Validate(res.Header.Header.DataHash)
ensures the transaction is correctly included in the block, improving security and reliability.
543-543
: Ensuring header validation with updatedCheckTMHeaderForZone
Updating the call to
CheckTMHeaderForZone
withres.GetHeader()
correctly uses the new function signature and ensures the header is validated before processing.
547-550
: Type assertion of the decoded transactionThe type assertion to
*tx.Tx
and corresponding error handling ensure that the transaction is in the expected format before proceeding.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 26
🧹 Outside diff range and nitpick comments (23)
icq-relayer/Dockerfile (1)
Line range hint
1-1
: Consider updating to golang:1.22.4-alpine3.19The current base image uses Go 1.22.2, while Go 1.22.4 is available with security updates. Consider updating to the latest patch version.
-FROM golang:1.22.2-alpine3.19 as build +FROM golang:1.22.4-alpine3.19 as buildicq-relayer/Makefile (1)
15-16
: Consider adding multi-arch support for broader compatibility.The new
dockerx
target is a good addition for explicit platform targeting. However, consider extending it to support multiple architectures for better deployment flexibility.dockerx: - docker buildx build --platform linux/amd64 -f Dockerfile . -t quicksilverzone/interchain-queries:${version} + docker buildx build --platform linux/amd64,linux/arm64 -f Dockerfile . -t quicksilverzone/interchain-queries:${version}proto/quicksilver/proofs/proofs.proto (2)
10-13
: Add field documentation for CelestiaProof messageConsider adding comments to document:
- Purpose of the
share_proof
field and its relationship to Celestia's proof system- Significance of the
index
field and its valid rangeExample documentation:
message CelestiaProof { + // share_proof contains the Celestia-specific proof data for transaction verification celestia.core.v1.proof.ShareProof share_proof = 1; + // index represents the position of the transaction in the block uint32 index = 2; }
1-17
: Document breaking changes in API documentationSince this introduces a breaking change replacing the
proof
field withproof_any
inGetTxWithProofResponse
, consider:
- Adding a comment block at the top of this file explaining the relationship with
GetTxWithProofResponse
- Documenting the version compatibility requirement (icq-relayer v1.1.0+)
- Creating migration guides for users updating from previous versions
Would you like me to help draft the documentation or create a migration guide?
🧰 Tools
🪛 buf
4-4: import "celestia/proof.proto": file does not exist
(COMPILE)
proto/quicksilver/interchainquery/v1/query.proto (1)
44-48
: Important: Breaking Changes - Version Compatibility RequirementsThis proto change introduces significant breaking changes that require careful coordination:
- Clients must migrate from using
proof
toproof_any
- The icq-relayer must be version 1.1.0 or higher when used with quicksilver v1.7.0+
- The response format changes affect all consumers of this proto message
Consider:
- Adding a migration guide in the documentation
- Including version compatibility matrix
- Adding runtime version checks where possible
proto/celestia/proof.proto (3)
11-17
: Consider enhancing field documentation.While the message-level documentation is good, individual fields would benefit from detailed documentation explaining their purpose and constraints.
Add field documentation like this:
message ShareProof { + // data contains the raw share data repeated bytes data = 1; + // share_proofs contains the NMT proofs for each share repeated NMTProof share_proofs = 2; + // namespace_id identifies the namespace these shares belong to bytes namespace_id = 3; + // row_proof contains the proof for the rows containing these shares RowProof row_proof = 4; + // namespace_version indicates the version of the namespace protocol being used uint32 namespace_version = 5; }
21-27
: Enhance RowProof field documentation.Consider adding detailed documentation for each field to improve maintainability.
Add field documentation like this:
message RowProof { + // row_roots contains the Merkle roots of individual rows repeated bytes row_roots = 1; + // proofs contains the Merkle proofs for each row repeated tendermint.crypto.Proof proofs = 2; + // root is the Merkle root that these rows are being proved against bytes root = 3; + // start_row indicates the beginning row number in this proof uint32 start_row = 4; + // end_row indicates the final row number in this proof uint32 end_row = 5; }
51-57
: Consider moving the reference Proof message to documentation.The commented-out Proof message appears to be for reference. Consider moving this to documentation comments or a separate documentation file to avoid confusion about whether it should be implemented.
-// Proof is taken from the merkle package -// message Proof { -// int64 total = 1; -// int64 index = 2; -// bytes leaf_hash = 3; -// repeated bytes aunts = 4; -// } +// Reference: The merkle package defines a Proof message with the following structure: +// - total (int64): field 1 +// - index (int64): field 2 +// - leaf_hash (bytes): field 3 +// - aunts (repeated bytes): field 4icq-relayer/pkg/types/config.go (1)
16-26
: Add field-level documentation for the Config struct.While the struct has a high-level comment, adding documentation for individual fields would improve maintainability and help users understand the purpose and constraints of each configuration option.
Example improvement:
// Config represents the config file for the relayer type Config struct { + // BindPort is the port number the relayer will listen on BindPort int + // MaxMsgsPerTx is the maximum number of messages to include in a single transaction MaxMsgsPerTx int + // AllowedQueries defines the list of permitted query types AllowedQueries []string // ... continue for other fieldsx/interchainstaking/keeper/callbacks_test.go (1)
Based on the search results, I can now generate a final response as we have sufficient context about the test coverage and implementation.
Test coverage for header validation is minimal and needs enhancement
The current test suite only covers two basic scenarios:
- Happy path - successful header validation
- Error case - when client state cannot be fetched
Given the implementation in
callbacks.go
shows complex validation logic including consensus state verification, the test coverage should be expanded to include:
- Consensus state validation failures
- Invalid trusted height scenarios
- Header verification against client state
- Edge cases around client and consensus state mismatches
🔗 Analysis chain
Line range hint
2230-2234
: Enhance test coverage for header validation and proof verification.While the test correctly uses the new
CheckTMHeaderForZone
signature, consider adding test cases that specifically validate:
- Different header types and their validation
- Edge cases in header validation
- Breaking changes related to proof validation
Let's check the test coverage:
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Verify test coverage for header validation and proof verification # Check for existing test cases covering header validation rg -A 10 "CheckTMHeaderForZone.*test" # Look for proof validation test cases ast-grep --pattern 'func Test.*Proof.*\(.*\) { $$$ }'Length of output: 97
Script:
#!/bin/bash # Let's try a broader search to understand the test coverage # Search for CheckTMHeaderForZone function and its tests rg -B 5 -A 15 "CheckTMHeaderForZone" # Search for test cases related to header validation rg -B 5 -A 15 "TestCheckTM" # Look for proof-related test cases with a more flexible pattern rg -B 5 -A 15 "Test.*Proof" # Check the actual implementation to understand what we're testing ast-grep --pattern 'func (k $_) CheckTMHeaderForZone($$$) { $$$ }'Length of output: 8096
utils/proofs/proofs.go (2)
23-53
: Add unit tests forCelestiaProof.Validate
Including unit tests for the
Validate
method inCelestiaProof
will help ensure correctness and prevent future regressions.Would you like assistance in creating unit tests for this method?
55-66
: Add unit tests forTendermintProof.Validate
Including unit tests for the
Validate
method inTendermintProof
will help ensure correctness and prevent future regressions.Would you like assistance in creating unit tests for this method?
third-party-chains/celestia-types/types/row_proof.go (1)
21-28
: Update comments to reflect the validation of StartRow and EndRowThe comments state that
StartRow
andEndRow
are not validated, but theValidate
method does perform checks on these fields. Updating the comments will provide accurate documentation of the current behavior.Apply this diff to update the comments:
// StartRow is the index of the start row. -// Note: currently, StartRow is not validated as part of the proof verification. -// If this field is used downstream, Validate(root) should be called along with -// extra validation depending on how it's used. +// Note: StartRow is validated in the Validate method.// EndRow is the index of the end row. -// Note: currently, EndRow is not validated as part of the proof verification. -// If this field is used downstream, Validate(root) should be called along with -// extra validation depending on how it's used. +// Note: EndRow is validated in the Validate method.icq-relayer/cmd/run.go (3)
38-68
: Add documentation comments to exported functionsThe exported function
InitConfigCommand
lacks a documentation comment. According to Go conventions, all exported functions should have comments explaining their purpose and usage.Consider adding a comment like:
// InitConfigCommand initializes the configuration command. func InitConfigCommand() *cobra.Command { //... }Similarly, please add documentation comments for other exported functions:
VersionCommand
,StartCommand
,InitConfig
, andCreateErrHandler
.
88-91
: Enhance error messages with contextWhen an error occurs while retrieving the home path, the returned error lacks context. Wrapping errors with additional information makes debugging easier.
Apply this diff to improve the error handling:
if err != nil { - return err + return fmt.Errorf("failed to get home path: %w", err) }Consider applying similar error wrapping to other instances where errors are returned directly.
95-98
: Handle RPC client initialization errors gracefullyIf the RPC client fails to initialize, the error returned might lack context about which RPC URL caused the failure.
Improve the error message to include the problematic RPC URL:
if err != nil { - return err + return fmt.Errorf("failed to create RPC client for %s: %w", config.DefaultChain.RpcUrl, err) }icq-relayer/pkg/types/rpc.go (2)
123-129
: Handle JSON-RPC response errors consistently.In the
broadcastTX
helper function, consider checking the JSON-RPC response for potential error codes or messages returned by the server. Currently, only the error from theCall
method is checked. Enhancing error handling can provide more informative feedback to the callers.You might enhance the error handling like this:
_, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err +} else if result.Code != 0 { + return nil, fmt.Errorf("RPC error: %s", result.Log) } return result, nil
348-374
: Validate theorderBy
parameter inBlockSearch
.Similar to
TxSearch
, theBlockSearch
function should validate theorderBy
parameter to prevent potential errors due to invalid values.Consider adding validation for
orderBy
:+if orderBy != "asc" && orderBy != "desc" { + return nil, fmt.Errorf("invalid orderBy value: %s", orderBy) +}icq-relayer/pkg/types/client.go (1)
462-464
: Remove commented-out codeThe commented-out code at lines 462-464 can be removed to improve code readability and maintainability.
Apply this diff to remove the unused code:
- //logger.Log("msg", "caching currentblock", "height", currentheight) - //} else { - //logger.Log("msg", "using cached currentblock", "height", currentheight)icq-relayer/pkg/runner/run.go (4)
630-631
: Use consistent logging instead offmt.Println
The use of
fmt.Println
for logging is inconsistent with the rest of the codebase, which utilizes thelogger
for logging messages. This inconsistency can make it harder to manage and filter logs.Replace
fmt.Println
withlogger.Log
for consistency.- fmt.Println("flush messages", len(toSend)) + _ = logger.Log("msg", "flush messages", "count", len(toSend))
731-731
: Use consistent logging instead offmt.Printf
Similarly, using
fmt.Printf
is inconsistent with the logging approach in the rest of the code. Consistent logging practices improve readability and maintainability.Replace
fmt.Printf
withlogger.Log
.- fmt.Printf("prepared %d messages\n", len(list)) + _ = logger.Log("msg", "prepared messages", "count", len(list))
685-695
: Remove commented-out debug statements and unnecessaryfmt.Println
There are commented-out
fmt.Println
statements and an activefmt.Println
in theprepareMessages
function, which are inconsistent with the logging strategy.Clean up the commented code and replace
fmt.Println
withlogger.Log
if the log messages are necessary.- //fmt.Println("prepareMessages", idx) ... - fmt.Println("unable to cast message to MsgSubmitQueryResponse") + _ = logger.Log("msg", "Unable to cast message to MsgSubmitQueryResponse")🧰 Tools
🪛 GitHub Check: CodeQL
[notice] 687-687: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
447-448
: Improve error message for client ID retrieval failureThe error message when failing to get the client ID lacks context, making it harder to troubleshoot.
Enhance the error message to include more details.
- _ = logger.Log("msg", fmt.Sprintf("Error: Could not get client id %s", err)) + _ = logger.Log("msg", "Error: Could not get client ID", "error", err)
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (7)
go.sum
is excluded by!**/*.sum
go.work
is excluded by!**/*.work
go.work.sum
is excluded by!**/*.sum
icq-relayer/go.sum
is excluded by!**/*.sum
third-party-chains/celestia-types/proto-types/proof.pb.go
is excluded by!**/*.pb.go
utils/proofs/proofs.pb.go
is excluded by!**/*.pb.go
x/interchainquery/types/query.pb.go
is excluded by!**/*.pb.go
📒 Files selected for processing (23)
- Makefile (2 hunks)
- go.mod (4 hunks)
- icq-relayer/.dockerignore (1 hunks)
- icq-relayer/Dockerfile (1 hunks)
- icq-relayer/Makefile (1 hunks)
- icq-relayer/cmd/config.go (1 hunks)
- icq-relayer/cmd/keys.go (0 hunks)
- icq-relayer/cmd/root.go (2 hunks)
- icq-relayer/cmd/run.go (1 hunks)
- icq-relayer/go.mod (7 hunks)
- icq-relayer/pkg/config/config.go (0 hunks)
- icq-relayer/pkg/runner/run.go (12 hunks)
- icq-relayer/pkg/types/client.go (1 hunks)
- icq-relayer/pkg/types/config.go (1 hunks)
- icq-relayer/pkg/types/rpc.go (1 hunks)
- proto/celestia/proof.proto (1 hunks)
- proto/quicksilver/interchainquery/v1/query.proto (2 hunks)
- proto/quicksilver/proofs/proofs.proto (1 hunks)
- third-party-chains/celestia-types/types/row_proof.go (1 hunks)
- third-party-chains/celestia-types/types/share_proof.go (1 hunks)
- utils/proofs/proofs.go (1 hunks)
- x/interchainstaking/keeper/callbacks.go (6 hunks)
- x/interchainstaking/keeper/callbacks_test.go (1 hunks)
💤 Files with no reviewable changes (2)
- icq-relayer/cmd/keys.go
- icq-relayer/pkg/config/config.go
✅ Files skipped from review due to trivial changes (1)
- icq-relayer/.dockerignore
🧰 Additional context used
🪛 GitHub Check: CodeQL
icq-relayer/cmd/run.go
[notice] 122-122: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismicq-relayer/pkg/runner/run.go
[notice] 143-143: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 159-193: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 190-190: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[warning] 216-216: Calling the system time
Calling the system time may be a possible source of non-determinism
[notice] 270-270: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 341-341: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[warning] 593-593: Calling the system time
Calling the system time may be a possible source of non-determinism
[warning] 600-600: Calling the system time
Calling the system time may be a possible source of non-determinism
[notice] 612-617: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[warning] 648-648: Calling the system time
Calling the system time may be a possible source of non-determinism
[notice] 687-687: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinism
[notice] 712-712: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismicq-relayer/pkg/types/client.go
[notice] 532-532: Floating point arithmetic
Floating point arithmetic operations are not associative and a possible source of non-determinism
🪛 buf
proto/celestia/proof.proto
4-4: import "tendermint/crypto/proof.proto": file does not exist
(COMPILE)
proto/quicksilver/proofs/proofs.proto
4-4: import "celestia/proof.proto": file does not exist
(COMPILE)
🪛 GitHub Check: devskim
third-party-chains/celestia-types/types/share_proof.go
[notice] 31-31: A "TODO" or similar was left in source code, possibly indicating incomplete functionality
Suspicious comment
🔇 Additional comments (37)
icq-relayer/Dockerfile (1)
18-18
: LGTM! Good addition of runtime dependencies.The addition of
jq
andcurl
makes sense for handling JSON data and making HTTP requests, which aligns well with the inter-chain query relayer's needs.proto/quicksilver/proofs/proofs.proto (2)
4-4
: Verify the Celestia proto import pathThe static analysis indicates that "celestia/proof.proto" cannot be found. This import is critical for the
CelestiaProof
message definition.Let's verify the proto file location:
✅ Verification successful
Import path "celestia/proof.proto" is correctly configured
The import path is valid and the proto file exists at the expected location
proto/celestia/proof.proto
. The file contains the necessary message definitions for Celestia proofs includingShareProof
,RowProof
, andNMTProof
that are required for the blockchain proof functionality.🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for celestia proof proto files fd -e proto -g '*proof.proto' | grep -i celestia # Check if the proto file might be in a third-party proto directory fd -t d -g '*proto*' | grep -i celestiaLength of output: 166
Script:
#!/bin/bash # Let's verify the content of the proto file to ensure it matches the import cat proto/celestia/proof.proto # Also check if there might be duplicate proto files in third-party directory fd -e proto -g '*proof.proto' third-party-chains/celestia-types/proto-types/Length of output: 2077
🧰 Tools
🪛 buf
4-4: import "celestia/proof.proto": file does not exist
(COMPILE)
15-17
: Document TendermintProof and verify interface implementation
- Add documentation to explain:
- The purpose of TendermintProof
- How it relates to CelestiaProof
- Their roles in the InclusionProof interface
Example documentation:
+// TendermintProof encapsulates proof data for transactions validated through Tendermint/Comet message TendermintProof { + // tx_proof contains the Tendermint-specific proof data for transaction verification tendermint.types.TxProof tx_proof = 1; }
- Verify that both proof types properly implement the InclusionProof interface:
✅ Verification successful
Let me gather more context about the CelestiaProof message to provide a complete verification.
Documentation needed but interface implementation verified
Add documentation to explain the purpose and relationship of both proof types:
+// CelestiaProof encapsulates proof data for transactions validated through Celestia +// using ShareProof for data availability sampling message CelestiaProof { celestia.core.v1.proof.ShareProof share_proof = 1; uint32 index = 2; } +// TendermintProof encapsulates proof data for transactions validated through Tendermint/Comet +// using the standard Merkle proof format from Tendermint message TendermintProof { tendermint.types.TxProof tx_proof = 1; }Both proof types correctly implement the InclusionProof interface with their respective Validate methods, as verified in utils/proofs/proofs.go.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for the InclusionProof interface implementation ast-grep --pattern 'type InclusionProof interface { $$$ Validate($$$) ([]byte, error) $$$ }' # Look for implementations rg -A 5 'func \(.*(?:Celestia|Tendermint)Proof\) Validate'Length of output: 1137
Script:
#!/bin/bash # Check CelestiaProof message definition cat proto/quicksilver/proofs/proofs.proto # Look for any existing documentation in the proto files rg -B 2 "message (?:Celestia|Tendermint)Proof" proto/Length of output: 874
proto/quicksilver/interchainquery/v1/query.proto (3)
9-9
: LGTM: Required import for the new proof_any field.
44-44
: LGTM: Clear deprecation notice for the proof field.The deprecation is properly marked with version 1.7.0, aligning with the breaking changes mentioned in the PR objectives.
47-48
: Enhance documentation for the proof_any field.While the field addition is correct, consider adding more detailed documentation about the expected content and format. This would help users understand that it should contain a proto-encoded message implementing the
utils/proofs.InclusionProof
interface.// proof_any is any wrapped proof + // Expected to contain a proto-encoded message implementing utils/proofs.InclusionProof + // Currently supports TendermintProof and CelestiaProof types google.protobuf.Any proof_any = 5;This change requires careful migration. Let's verify the impact:
✅ Verification successful
Let me gather more information about the proof types and their usage.
Let me check the implementation of InclusionProof interface and its usage.
Based on the gathered information, I can now provide the final response:
Documentation enhancement for proof_any field is accurate and well-aligned with implementation.
The review comment's suggestion to enhance documentation is correct and matches the codebase implementation:
- The field is used to wrap proofs implementing the
utils/proofs.InclusionProof
interface- The codebase currently supports both
TendermintProof
andCelestiaProof
types as mentioned- The proof validation logic is properly implemented in the
proofs.go
fileThe suggested documentation addition provides valuable context for users and accurately reflects the current implementation.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for direct usage of the deprecated 'proof' field rg -A 5 'GetTxWithProofResponse.*proof[^_]' # Search for potential unmigrated code that might break rg -A 5 'GetTxWithProofResponse.*TxProof'Length of output: 1031
Script:
#!/bin/bash # Search for proof types and their implementations ast-grep --pattern 'type $_ struct { $$$ proof $$$ $$$ }' # Look for Any proof usage and unmarshaling rg -A 5 'UnmarshalAny.*proof' # Check for proof interfaces and their implementations fd -e go -x grep -l "InclusionProof" {} \;Length of output: 267
Script:
#!/bin/bash # Check the InclusionProof interface definition cat utils/proofs/proofs.go # Look for proof_any field usage in the codebase rg -A 5 'proof_any' # Check the implementation in callbacks cat x/interchainstaking/keeper/callbacks.goLength of output: 33661
proto/celestia/proof.proto (3)
29-49
: LGTM! Well-documented NMT proof structure.The NMTProof message is well-structured and thoroughly documented, clearly explaining the purpose of each field and their byte size requirements.
7-7
: Verify the go_package path alignment with module structure.The go_package path references a 'third-party-chains' directory. Please verify this aligns with your module structure and dependency management strategy.
✅ Verification successful
Let me gather more information about the module structure to verify this alignment.
Let me check the contents of the third-party-chains directory to verify its structure.
The go_package path correctly aligns with the module structure
The
third-party-chains
directory exists in the repository root and contains the expectedcelestia-types/proto-types
subdirectory where the generated protobuf code is placed. The go_package pathgithub.com/quicksilver-zone/quicksilver/third-party-chains/celestia-types/proto-types
correctly matches this structure and follows the same pattern used by other proto files in the codebase (e.g., osmosis-types, umee-types).🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Check if the package path exists and is consistently used rg -g "*.proto" "github.com/quicksilver-zone/quicksilver/third-party-chains"Length of output: 3988
Script:
#!/bin/bash # Check for go.mod file and its content cat go.mod # Check if the third-party-chains directory exists and its structure fd "third-party-chains"Length of output: 18001
Script:
#!/bin/bash # List contents of third-party-chains directory ls -R third-party-chains/ # Check for any Go files in the celestia-types directory fd -e go . third-party-chains/celestia-typesLength of output: 3791
4-4
: Verify the tendermint/crypto/proof.proto import path.The static analysis indicates this import might not exist. Please ensure the correct import path is used and the proto file is available in your build environment.
🧰 Tools
🪛 buf
4-4: import "tendermint/crypto/proof.proto": file does not exist
(COMPILE)
icq-relayer/cmd/root.go (2)
72-72
: Verify the removal of key management functionality.The
keysCmd()
has been commented out, which removes key management functionality. Since this is a breaking change and the relayer needs to handle proofs, we should verify that key management has been implemented differently.Let's check for alternative key management:
✅ Verification successful
Key management is now handled internally through ChainConfig
The removal of
keysCmd()
is valid as the key management functionality is now implemented directly in theChainConfig
type. The relayer uses an in-memory keyring with the following features:
- Private key generation and management via
GetPrivKeyAddress()
- Transaction signing using the generated keys
- Memory-backed keyring implementation (
keyring.BackendMemory
)This is actually a more secure approach as it eliminates the need for manual key management commands and handles keys programmatically within the application lifecycle.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Look for new key management implementation # Expected: Find new key handling code or documentation # Search for key-related functionality rg -g '!vendor/*' -g '*.go' 'func.*(Key|key)' ./icq-relayer # Look for key management in documentation fd -g 'README*' -g 'CHANGELOG*' -X rg -l 'key management|keys' {}Length of output: 442
Script:
#!/bin/bash # Let's do a more comprehensive search for key-related functionality # Search for key-related code in the entire codebase rg -g '!vendor/*' -g '*.go' -i '(key|keys|keyring|privkey|pubkey)' ./icq-relayer # Check for any configuration related to keys fd -e yaml -e toml -X rg -l 'key|keys|keyring' {} # Look for key imports and types rg -g '!vendor/*' -g '*.go' 'crypto.*Key|keyring' ./icq-relayerLength of output: 4367
48-54
: Verify the new configuration management approach.The removal of
PersistentPreRunE
suggests that configuration initialization has changed. Since this is part of a breaking change release (v1.7.x), we should ensure that the new approach is properly documented and implemented.Let's check for new configuration handling:
✅ Verification successful
Let me gather more specific information about the new configuration approach.
Configuration management has been updated and is properly implemented
The commented-out
PersistentPreRunE
is correct as the codebase has moved from a YAML-based configuration using Viper to a TOML-based configuration system. The new implementation:
- Uses
InitConfig()
inrun.go
which callstypes.InitializeConfigFromToml()
- Provides default configuration via
NewConfig()
- Supports both file-based config and sensible defaults
- Is documented in README.md with clear path information (
$HOME/.icq/config
)Key improvements in the new system:
- More structured configuration with strong typing
- Built-in defaults for mainnet compatibility
- Simpler initialization process
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Look for new configuration management implementation # Expected: Find new configuration handling code # Search for potential new config implementations rg -g '!vendor/*' -g '*.go' 'func.*[Cc]onfig' ./icq-relayer # Look for documentation about configuration fd -g 'README*' -X rg -l 'configuration|config' {}Length of output: 4946
Script:
#!/bin/bash # Check the new configuration implementation in run.go and config.go rg -A 10 'func InitConfig' ./icq-relayer/cmd/run.go cat ./icq-relayer/cmd/config.go # Check the configuration types and initialization cat ./icq-relayer/pkg/types/config.go # Look for configuration documentation cat ./icq-relayer/README.mdLength of output: 6868
icq-relayer/pkg/types/config.go (1)
16-26
: Verify configuration support for new proof format.Given that this PR introduces breaking changes related to Celestia Inclusion proofs, ensure that the configuration structure supports the new
proof_any
field and related functionality.✅ Verification successful
Based on the search results, I can now generate the final response:
Configuration structure aligns with new proof format
The codebase shows proper implementation of the new proof format:
ProofAny
field is correctly used inGetTxWithProofResponse
- Both
CelestiaProof
andTendermintProof
implementations are present- No usage of deprecated proof fields was found
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for proof-related types and fields in the codebase echo "Searching for proof-related types and fields..." rg -t go "proof.*Any|InclusionProof|CelestiaProof|TendermintProof" --glob '!vendor/*' # Check for potential uses of the deprecated 'proof' field echo "Checking for uses of deprecated 'proof' field..." rg -t go "GetTxWithProofResponse.*proof[^_]" --glob '!vendor/*'Length of output: 7048
icq-relayer/go.mod (3)
197-198
: LGTM: Replace directives are properly configuredThe replace directives are correctly set up:
- Local path for quicksilver development
- Using the maintained CometBFT fork of Tendermint
42-43
: Verify Celestia dependencies integrationThe addition of Celestia-related packages aligns with the PR objectives for handling Celestia Inclusion proofs. However, these are marked as indirect dependencies.
#!/bin/bash # Description: Verify Celestia dependency usage # Look for direct usage of Celestia packages # Check for imports of Celestia packages rg -l "github\.com/celestiaorg" # Look for Celestia-related types or functions rg "CelestiaProof|ShareProof"
3-3
: Verify Go version compatibility across environmentsThe upgrade to Go 1.23.2 is a significant change. Ensure all development and deployment environments support this version.
Makefile (1)
41-41
: LGTM: Addition ofosusergo
build tagThe
osusergo
build tag is a good addition as it ensures the binary uses the OS's native user/group functions, which improves security and reduces binary size.go.mod (4)
9-10
: New Celestia dependencies align with PR objectives.The addition of Celestia-related dependencies:
github.com/celestiaorg/go-square/v2 v2.0.0
github.com/celestiaorg/nmt v0.22.2
These additions align with the PR objectives for handling Celestia Inclusion proofs.
39-39
: Updated Google API dependencies.The following Google API dependencies have been updated to the latest versions:
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/rpc
These updates align with the dependency ecosystem.
Also applies to: 336-336
316-319
: OpenTelemetry dependencies have been updated.The OpenTelemetry dependencies have been updated to version 1.28.0:
go.opentelemetry.io/otel
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/trace
This update might require changes to existing telemetry implementations.
#!/bin/bash # Check for OpenTelemetry usage that might need updates rg -l 'opentelemetry|otel'
3-3
: Verify Go version compatibility with deployment environments.The Go version has been upgraded from 1.22.4 to 1.23.2. This is a significant version jump that might affect deployment environments.
icq-relayer/cmd/config.go (1)
9-50
:⚠️ Potential issueAssess the impact of commenting out the configuration initialization logic
The entire body of the
initConfig
function has been commented out, effectively disabling the configuration loading, unmarshalling, and validation processes. This may lead to the application using default values or encountering unexpected behavior due to missing configurations.Please verify whether the application can function correctly without this configuration initialization. If the configuration is no longer required, consider removing the commented-out code to keep the codebase clean.
Run the following script to check for any usage of the
cfg
variable throughout the codebase:icq-relayer/cmd/run.go (1)
122-122
: Previous concern about spawning a goroutine remainsThe earlier review comment about potential non-determinism due to spawning a goroutine is still applicable. Ensure thread safety and proper synchronization within the
runner.Run
function.🧰 Tools
🪛 GitHub Check: CodeQL
[notice] 122-122: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismicq-relayer/pkg/types/rpc.go (2)
1-410
: Overall code quality is good.The implementation provides comprehensive coverage of the Tendermint RPC client interfaces, with consistent error handling and usage of contexts for cancellation and timeouts.
263-273
: Ensure correct encoding for thehash
parameter.In the
BlockByHash
function, thehash
parameter is passed as a byte slice. Confirm that the hash is appropriately encoded (e.g., hex-encoded string) as expected by the RPC server to prevent errors.Run the following script to check how
BlockByHash
is called:#!/bin/bash # Description: Find all calls to 'BlockByHash' and check hash encoding. # Test: Search for 'BlockByHash' function calls. rg 'BlockByHash\(' icq-relayer/icq-relayer/pkg/runner/run.go (1)
612-617
: Confirm non-determinism is acceptable when spawning goroutinesSpawning goroutines inside loops, such as in this block, can lead to non-deterministic execution order, which might be problematic in certain contexts.
Ensure that non-determinism introduced by spawning goroutines does not adversely affect the application's correctness.
🧰 Tools
🪛 GitHub Check: CodeQL
[notice] 612-617: Spawning a Go routine
Spawning a Go routine may be a possible source of non-determinismx/interchainstaking/keeper/callbacks.go (12)
32-32
: Importproofs
packageThe import
"github.com/quicksilver-zone/quicksilver/utils/proofs"
is necessary for handlingInclusionProof
. This addition is appropriate.
461-463
: Retrieve consensus state usingheader.TrustedHeight
The consensus state is now fetched using
header.TrustedHeight
, which ensures the correct state is retrieved for validation.
477-477
: Validate Tendermint state withcheckTMStateValidity
The updated call to
checkTMStateValidity
includesheader
and the current block time, which is appropriate for state validation.
505-508
: Add error handling for unmarshalling argumentsProper error handling and logging have been added when unmarshalling
args
intores
. This enhances debuggability.
510-511
: InitializetxBytes
variableThe declaration of
var txBytes []byte
initializes the variable for later use in processing the transaction bytes.
512-513
: UnpackProofAny
intoInclusionProof
Unpacking
res.ProofAny
intoinclusionProof
allows for validation of the inclusion proof, which is necessary for transaction verification.
518-521
: Validate inclusion proof and obtain transaction bytesThe inclusion proof is validated using
res.Header.Header.DataHash
, andtxBytes
are extracted. Error handling is appropriately managed.
523-524
: Compute transaction hash fromtxBytes
Calculating the transaction hash using
tmhash.Sum(txBytes)
ensures the integrity of the transaction data.
527-527
: Unmarshalquery.Request
intoGetTxRequest
Unmarshalling
query.Request
intoqueryRequest
retrieves the expected transaction hash for comparison.
543-543
: Verify client state with updated headerThe call to
CheckTMHeaderForZone
now passesres.GetHeader()
, which is consistent with the updated method signature and ensures client state validity.
547-547
: Decode transaction usingTxDecoder
The transaction bytes are decoded using
TxDecoder(k.cdc)
, and errors are correctly handled, facilitating further processing of the transaction.
450-450
: UpdateCheckTMHeaderForZone
method signatureThe method
CheckTMHeaderForZone
now accepts a*tmclienttypes.Header
instead oficqtypes.GetTxWithProofResponse
, which aligns with the updated handling of headers.Ensure all invocations of
CheckTMHeaderForZone
are updated to match the new signature. You can verify this by running:✅ Verification successful
Based on the search results, I can generate the final response as we have found all the invocations of
CheckTMHeaderForZone
:✅ All invocations of
CheckTMHeaderForZone
are correctly updatedThe method signature change is properly reflected across all usages:
- In
callbacks.go
: Called withres.GetHeader()
which returns a*tmclienttypes.Header
- In
callbacks_test.go
: Called withtxRes.GetHeader()
which returns a*tmclienttypes.Header
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Verify all calls to `CheckTMHeaderForZone` pass the correct arguments. # Expected: All calls to `CheckTMHeaderForZone` pass a `*tmclienttypes.Header` as the third parameter. rg 'CheckTMHeaderForZone\([^\)]*\)' -A 3Length of output: 1396
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 9
🧹 Outside diff range and nitpick comments (5)
.gitignore (1)
29-29
: Consider using a more flexible pattern for cross-platform compatibility.While the current pattern works, consider using a more flexible pattern that would work across platforms and different build configurations:
-icq-relayer/icq-relayer +icq-relayer/icq-relayer*This would ensure the pattern catches the binary regardless of the platform-specific extensions (e.g.,
.exe
on Windows).icq-relayer/Makefile (1)
1-20
: Add prerequisites check.Consider adding a check for required tools (git, docker) to provide early feedback if they're missing.
Add this at the beginning of the Makefile:
+REQUIRED_TOOLS := git docker + +.PHONY: check-prerequisites +check-prerequisites: + $(foreach tool,$(REQUIRED_TOOLS),\ + $(if $(shell command -v $(tool) 2>/dev/null),,\ + $(error "$(tool) is required but not installed."))) + +build docker dockerx docker-push: check-prerequisites + .PHONY: buildicq-relayer/pkg/runner/run.go (1)
441-446
: Enhance error handling for Tx retrieval.The error messages in the Tx retrieval case could be more informative by including the hash and chain ID for better debugging.
- if err != nil { - _ = logger.Log("msg", fmt.Sprintf("Error: Could not fetch proof %s", err)) + if err != nil { + _ = logger.Log("msg", fmt.Sprintf("Error: Could not fetch proof for tx %s on chain %s: %v", req.GetHash(), query.ChainId, err)) return }icq-relayer/cmd/run.go (2)
63-63
: Use command's output methods instead offmt.Printf
At line 63, you're using
fmt.Printf
to print the configuration file creation message. Consider usingcmd.Printf
orcmd.Println
provided by the Cobra framework. This ensures that the output respects any command-line output settings and maintains consistency across your CLI application.
75-79
: Use command's output methods instead offmt.Printf
In the
VersionCommand
function (lines 75-79), you're usingfmt.Printf
to display version information. It's better to usecmd.Printf
orcmd.Println
to ensure consistency with the Cobra framework's output handling.
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (2)
go.work.sum
is excluded by!**/*.sum
icq-relayer/go.sum
is excluded by!**/*.sum
📒 Files selected for processing (6)
- .gitignore (1 hunks)
- icq-relayer/Dockerfile (2 hunks)
- icq-relayer/Makefile (1 hunks)
- icq-relayer/cmd/run.go (1 hunks)
- icq-relayer/go.mod (6 hunks)
- icq-relayer/pkg/runner/run.go (12 hunks)
🔇 Additional comments (14)
icq-relayer/Dockerfile (1)
17-17
: Verify the existence of build target in Makefile.The build process now relies on a
make build
target. Let's ensure this target exists and is properly configured.✅ Verification successful
The
build
target exists and is properly configuredThe Makefile contains a well-defined build target that:
- Is part of the
BUILD_TARGETS := build install
declaration- Sets the output directory via
build: BUILD_ARGS=-o $(BUILDDIR)/
- Inherits the common build flags and checks including:
- Go version verification
- Proper build tags and ldflags
- Module verification via go.sum
- Output to the specified build directory
The build target is properly integrated into the build system and will work as expected in the Dockerfile.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Check if Makefile exists and contains build target if [ -f "Makefile" ]; then echo "Checking build target in Makefile:" grep -A 5 "^build:" Makefile else echo "Makefile not found" fiLength of output: 292
Script:
#!/bin/bash # Let's check the complete Makefile content to understand the build setup cat MakefileLength of output: 16380
icq-relayer/Makefile (4)
7-9
: LGTM! Well-structured ldflags definition.The ldflags are correctly structured to embed version information into the binary.
11-12
: LGTM! Build target correctly uses ldflags.The build target is properly updated to include version information.
16-17
: Previous review comments about multi-platform support are still applicable.
19-20
: Previous review comments about adding dockerx-push target are still applicable.icq-relayer/go.mod (3)
15-15
: LGTM: Quicksilver version aligns with requirementsThe update to quicksilver v1.7.0-rc0 aligns with the PR objectives requiring v1.7.0+ for Celestia Inclusion proofs support.
42-43
: LGTM: Celestia dependencies added for proof handlingThe addition of Celestia-related dependencies aligns with the PR objectives for handling Celestia Inclusion proofs:
- github.com/celestiaorg/go-square/v2
- github.com/celestiaorg/nmt
6-7
: Verify usage of new dependenciesSeveral new dependencies have been added:
- cosmossdk.io/math: Enhanced math operations
- github.com/BurntSushi/toml: Configuration handling
- github.com/mitchellh/go-homedir: Home directory handling
- github.com/rs/zerolog: Structured logging
Let's verify their usage in the codebase.
#!/bin/bash # Check usage of new dependencies echo "Checking math package usage:" rg -A 3 'cosmossdk\.io/math' --type go echo "Checking TOML config usage:" rg -A 3 'github\.com/BurntSushi/toml' --type go echo "Checking homedir usage:" rg -A 3 'github\.com/mitchellh/go-homedir' --type go echo "Checking zerolog usage:" rg -A 3 'github\.com/rs/zerolog' --type goAlso applies to: 13-13, 16-16
icq-relayer/pkg/runner/run.go (2)
45-54
: LGTM: New types align with PR objectives.The new types
ClientUpdateRequirement
andMessage
provide a clean structure for handling client updates and message routing, which aligns well with the PR's focus on improving proof handling.
737-746
: LGTM: Clean implementation of cleanup function.The
Close
function properly handles unsubscription and client shutdown with appropriate error handling.icq-relayer/cmd/run.go (4)
104-115
: LGTM!The client context is properly configured with all necessary components and settings. This setup ensures that the application has the correct codec, interface registry, and other configurations required for operation.
118-120
: Consider using a cancellable context for graceful shutdownCurrently,
context.Background()
is used at line 118, which cannot be canceled. To enable graceful shutdown ofrunner.Run
, consider usingcontext.WithCancel
to create a cancellable context. This allows you to signal cancellation and ensure that all goroutines and processes are properly terminated upon receiving a shutdown signal.
138-141
: Handle errors during configuration initializationIn the
InitConfig
function (lines 138-141), errors fromtypes.InitializeConfigFromToml(homepath)
are not handled. If an error occurs during configuration loading, it could lead to nil pointers or runtime panics later. Consider modifyingInitializeConfigFromToml
to return an error and handle it appropriately to ensure robust error handling.
143-147
: Avoid sending signals directly in error handlerUsing
sigC <- syscall.SIGABRT
within theCreateErrHandler
function (lines 143-147) may lead to unintended behavior or complications with signal handling. Consider invoking a cancellation function or closing channels to signal shutdown more gracefully.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 4
🧹 Outside diff range and nitpick comments (3)
.github/workflows/icq-relayer-build-test.yml (1)
Workflow triggers for icq-relayer need adjustment
The current path pattern
icq-relayer/**
in the workflow triggers may miss some important changes. The analysis shows that icq-relayer is referenced in:
go.work
: Contains workspace configuration for the icq-relayer moduledocker-compose.yml
: Contains icq-relayer service configurationsscripts/vars.sh
: Contains icq-relayer related environment variables and commandsConsider updating the workflow triggers to include these files:
on: push: paths: - 'icq-relayer/**' - 'go.work' - 'docker-compose.yml' - 'scripts/vars.sh' pull_request: paths: - 'icq-relayer/**' - 'go.work' - 'docker-compose.yml' - 'scripts/vars.sh'🔗 Analysis chain
Line range hint
1-37
: Verify workflow triggers for icq-relayer changesThe workflow configuration looks good overall, with appropriate triggers and matrix testing across different OS environments.
Let's verify the path patterns to ensure we're catching all relevant icq-relayer changes:
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Verify all icq-relayer related files are under the specified path # Test: Check if there are any icq-relayer related files outside the icq-relayer directory rg -l "icq.*relayer" --type-add 'go:*.{go,mod,sum}' -g '!icq-relayer/**'Length of output: 117
Script:
#!/bin/bash # Let's check the content of these files to understand the icq-relayer references echo "=== Content of go.work ===" cat go.work echo -e "\n=== Content of docker-compose.yml ===" cat docker-compose.yml echo -e "\n=== Content of scripts/vars.sh ===" cat scripts/vars.shLength of output: 10607
.github/workflows/golangci.yml (1)
26-26
: Consider using a floating patch version.While pinning to a specific version (1.23.2) ensures reproducible builds, using a floating patch version like "1.23.x" would automatically include security fixes while maintaining API compatibility.
Apply this diff to use a floating patch version:
- go-version: "1.23.2" + go-version: "1.23.x".github/workflows/buildtest.yaml (1)
Line range hint
73-98
: Consider enhancing test coverage for breaking changes.Given that this release (v1.7.x) introduces breaking changes in Celestia Inclusion proofs:
- Consider adding specific test coverage for the new
proof_any
field andInclusionProof
interface- The current coverage exclusions are appropriate, but you might want to ensure new proof-related code is not accidentally excluded
Add a comment to clarify which specific paths should be excluded from coverage, especially for new proof-related code. For example:
excludelist="$(find ./ -type f -name '*.go' | xargs grep -l 'DONTCOVER')" excludelist+=" $(find ./ -type f -name '*.pb.go')" excludelist+=" $(find ./ -type f -name '*.pb.gw.go')" excludelist+=" $(find ./ -type f -path './test/*.go')" excludelist+=" $(find ./ -type f -path './third-party-chains/*.go')" +# Note: Ensure new proof-related code under x/interchainquery/types is covered
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
📒 Files selected for processing (10)
- .github/workflows/buildtest.yaml (3 hunks)
- .github/workflows/codeql.yml (1 hunks)
- .github/workflows/docker.yaml (1 hunks)
- .github/workflows/golangci.yml (1 hunks)
- .github/workflows/icq-relayer-build-test.yml (2 hunks)
- .github/workflows/simulation.yml (1 hunks)
- Dockerfile (2 hunks)
- Dockerfile.relayer (0 hunks)
- docker-compose.yml (0 hunks)
- proto/Dockerfile (1 hunks)
💤 Files with no reviewable changes (2)
- Dockerfile.relayer
- docker-compose.yml
🔇 Additional comments (8)
proto/Dockerfile (1)
2-2
:⚠️ Potential issue
⚠️ Avoid using Go 1.23.2 in production codeThe base image has been updated to use Go 1.23.2, which is currently in beta/release candidate stage and not recommended for production use, especially in a release branch. This could introduce instability in the build environment.
Consider using the latest stable version instead:
-FROM golang:1.23.2-alpine3.20 +FROM golang:1.22.4-alpine3.19Let's verify the Go version status:
.github/workflows/docker.yaml (1)
29-29
:⚠️ Potential issueCritical: Invalid Go version specified
The specified Go version
1.23.2
does not exist. As of April 2024, the latest stable version of Go is 1.22.4.Apply this change to use the latest stable version:
- go-version: "1.23.2" + go-version: "1.22.4"Dockerfile (2)
25-25
: LGTM: Base image updateThe update to Alpine 3.20 is appropriate and maintains consistency with the builder stage.
Line range hint
1-31
: Verify build requirements for Celestia proof handlingGiven that this release focuses on Celestia Inclusion proofs and introduces breaking changes in proof handling, please verify if any additional build dependencies or configurations are required.
✅ Verification successful
Let me gather more information about the Celestia-related code and build requirements.
Let me analyze the Dockerfile and codebase for Celestia proof handling requirements.
Let me check if there are any special build configurations needed for Celestia proof handling.
Based on the analysis of the codebase, I can now provide the final response:
No additional build requirements needed for Celestia proof handling
The Celestia proof handling is already properly integrated through standard Go dependencies:
- Required dependencies are properly specified in go.mod:
github.com/celestiaorg/go-square/v2
andgithub.com/celestiaorg/nmt
- No special build tags or compile-time configurations are needed
- Proof handling code is implemented as standard Go packages with no special build requirements
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Check for Celestia-related dependencies in go.mod rg -A 5 "celestia" go.mod # Look for any build tags or constraints related to proofs rg -g '*.go' -B 2 "//\s*\+build"Length of output: 498
Script:
#!/bin/bash # Check for Celestia imports and usage in the codebase rg -g '*.go' -B 2 -A 2 "celestia" # Check for proof-related code and dependencies rg -g '*.go' -B 2 -A 2 "proof|Proof" # Check if there are any build-specific make targets cat MakefileLength of output: 238313
Script:
#!/bin/bash # Check for Celestia-related build dependencies in go.mod rg -A 2 "github.com/celestiaorg" go.mod # Check for any build tags or constraints related to Celestia rg -g '*.go' "//\s*\+build" # Look for Celestia-related code that might require special build setup rg -g '*.go' "celestia" utils/proofs/Length of output: 861
Script:
#!/bin/bash # Check for any Celestia-specific build flags or tags in Makefile rg "celestia|proof" Makefile # Check for any Celestia-specific build configurations in build scripts fd -t f -e sh . | xargs rg "celestia|proof" # Look for any Celestia-specific initialization or setup code rg -g '*.go' "func init.*celestia|func New.*celestia"Length of output: 282
.github/workflows/golangci.yml (1)
26-26
: Verify Go version consistency across the repository.The Go version update to 1.23 is significant and may require updates to the codebase. Let's verify that this version is consistently specified across the repository.
✅ Verification successful
Go version 1.23.2 is consistently specified across the repository
The verification shows that Go version 1.23.2 is consistently specified in:
go.mod
:go 1.23.2
- All GitHub workflow files:
.github/workflows/golangci.yml
.github/workflows/icq-relayer-build-test.yml
.github/workflows/docker.yaml
.github/workflows/codeql.yml
.github/workflows/buildtest.yaml
.github/workflows/simulation.yml
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Check Go version specifications across the repository # Expected: All Go version references should specify 1.23 echo "Checking Go version in go.mod:" rg -l "^go 1\." go.mod echo -e "\nChecking Go version in GitHub workflows:" rg -l "go-version.*1\." .github/workflows/ echo -e "\nChecking Go version requirements in Makefile:" rg "GO.*1\." Makefile echo -e "\nChecking for potential Go version constraints in go.mod:" rg "require.*go.*1\." go.modLength of output: 736
Script:
#!/bin/bash # Let's check the actual Go versions specified in these files echo "Go version in go.mod:" cat go.mod | grep "^go " echo -e "\nGo versions in GitHub workflows:" echo "=== golangci.yml ===" cat .github/workflows/golangci.yml | grep "go-version" echo "=== icq-relayer-build-test.yml ===" cat .github/workflows/icq-relayer-build-test.yml | grep "go-version" echo "=== docker.yaml ===" cat .github/workflows/docker.yaml | grep "go-version" echo "=== codeql.yml ===" cat .github/workflows/codeql.yml | grep "go-version" echo "=== buildtest.yaml ===" cat .github/workflows/buildtest.yaml | grep "go-version" echo "=== simulation.yml ===" cat .github/workflows/simulation.yml | grep "go-version"Length of output: 1218
.github/workflows/codeql.yml (1)
Line range hint
32-33
: LGTM: Enhanced security checks with additional CodeQL queriesThe addition of experimental queries for detecting:
- Defer statements in loops
- Unsafe package misuse
- Division by zero vulnerabilities
These are valuable additions that will help catch potential security issues in the codebase, particularly important for blockchain applications.
.github/workflows/buildtest.yaml (2)
Line range hint
108-121
: Review simulation testing strategy for breaking changes.The simulate job is currently only triggered with the 'run-sim' label. Given the breaking changes in this release regarding Celestia Inclusion proofs:
- Consider making simulation tests mandatory for PRs that modify proof-related code
- The current caching strategy is good for build performance
#!/bin/bash # Description: Check if simulation tests exist for proof-related functionality echo "Checking for proof-related simulation tests..." rg -l "simulation.*test.*proof" test/Consider updating the job trigger condition to also run on changes to proof-related files:
if: contains(github.event.pull_request.labels.*.name, 'run-sim') || contains(github.event.pull_request.files.*.filename, 'x/interchainquery/types')
38-38
: Verify compatibility with Go 1.23.2 upgrade.The upgrade to Go 1.23.2 is a significant version jump that warrants careful consideration:
- Ensure all dependencies are compatible with Go 1.23.2
- Verify that this version upgrade is coordinated with the mentioned workflow files (codeql.yml, docker.yaml, golangci.yml, icq-relayer-build-test.yml)
Consider adding a comment in the workflow file explaining the rationale for upgrading to Go 1.23.2, especially since this is part of a release that introduces breaking changes.
Also applies to: 69-69, 120-120
✅ Verification successful
Go version 1.23.2 upgrade is consistent across workflows
The Go version upgrade to 1.23.2 is consistently applied across all relevant workflow files that use Go:
- buildtest.yaml
- codeql.yml
- docker.yaml
- golangci.yml
- icq-relayer-build-test.yml
- simulation.yml
Additionally, the go.mod files confirm that the project has been updated to use Go 1.23.2 as the base version. The upgrade appears to be a coordinated effort across the entire codebase.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Verify Go version consistency across workflow files and check go.mod # for potential compatibility issues with Go 1.23.2 echo "Checking Go version consistency across workflow files..." rg -l "go-version.*1\.23\.2" .github/workflows/ echo -e "\nChecking go.mod for Go version and dependencies..." if [ -f "go.mod" ]; then cat go.mod | grep -E "^go |^require " fiLength of output: 237
Script:
#!/bin/bash # Let's verify the Go version changes and dependencies more thoroughly echo "Checking all Go version references in workflow files..." rg "go-version" .github/workflows/ echo -e "\nChecking if go.mod exists and its content..." find . -name "go.mod" -type f -exec cat {} \; echo -e "\nChecking other workflow files for Go setup..." find .github/workflows -type f -exec grep -l "setup-go" {} \;Length of output: 29152
Script:
#!/bin/bash # Let's verify the Go version in all workflow files that use setup-go echo "Checking Go version in all workflow files..." for file in .github/workflows/*.{yml,yaml}; do if [ -f "$file" ]; then echo -e "\n=== $file ===" grep -A 1 "setup-go@" "$file" fi doneLength of output: 2973
This pull request has been automatically marked as stale because it has not had any recent activity. It will be closed if no further activity occurs. Thank you! |
* register inclusion proof implementations * handle compat mode properly * fix issue with proof reconstruction from non-zero index * ensure we unmarshal the result for default tx types * guard against panic in epoch hook for brand new zone * fix bug in claims manager if local chain id has multiple hyphens
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 7
🧹 Outside diff range and nitpick comments (11)
x/interchainstaking/types/codec.go (1)
51-52
: Important architectural change for proof handling.The registration of proof interfaces is essential for the new Celestia Inclusion proof system. This change supports the breaking change where
proof
is replaced withproof_any
of typeAny
, allowing for different proof implementations (TendermintProof and CelestiaProof) through theInclusionProof
interface.Note: This requires icq-relayer version 1.1.0+ for compatibility with quicksilver version 1.7.0+.
x/claimsmanager/keeper/keeper.go (2)
56-56
: Enhance error message clarity.While including the chain ID is helpful, the error message could be more descriptive about the parsing failure.
Consider this improvement:
- k.Logger(ctx).Error("Error getting revision number for client ", "chainID", ctx.ChainID()) + k.Logger(ctx).Error("Failed to parse revision number from chain ID", "chainID", ctx.ChainID(), "error", err.Error())
53-56
: Consider version compatibility implications.Given that this PR introduces breaking changes for proof handling and requires icq-relayer v1.1.0+, ensure that the consensus state handling remains compatible with both old and new relayer versions during the transition period.
Consider:
- Adding version checks or graceful fallbacks
- Documenting the minimum compatible versions in the module's README
- Including upgrade instructions for operators
utils/proofs/proofs.go (1)
56-62
: Consider using bytes.Equal for hash comparison.Using
strings.EqualFold
for hex-encoded hash comparison is inefficient. Consider comparing the byte slices directly.for _, tx := range txs { hash := tmhash.Sum(tx) - hashStr := hex.EncodeToString(hash) - if strings.EqualFold(hashStr, txHash) { + expectedHash, err := hex.DecodeString(txHash) + if err != nil { + return nil, fmt.Errorf("invalid transaction hash format: %w", err) + } + if bytes.Equal(hash, expectedHash) { return tx, nil } }x/interchainstaking/keeper/hooks.go (1)
32-34
: LGTM! Consider adding a comment explaining the early return.The nil check is a good defensive programming practice that prevents potential panics. However, adding a brief comment would help explain why we skip zones without delegation addresses.
if zone.DelegationAddress == nil { + // Skip zones without delegation addresses as they are not yet fully initialized return false }
utils/proofs/proofs_test.go (2)
49-49
: Remove redundant error check.This error check is redundant as there's no error being returned in the previous lines.
- require.NoError(t, err, "i", i)
1-58
: Consider enhancing test coverage for the breaking change.Given that this PR introduces a breaking change in proof handling, consider adding the following test scenarios:
- Migration tests to verify handling of old proof format
- Tests for invalid proof formats
- Tests for version compatibility checks
- Integration tests with the ICQ relayer
This will help ensure a smooth transition for users upgrading from previous versions.
icq-relayer/pkg/types/client.go (3)
469-469
: Make cache TTL configurableThe cache TTL is hardcoded to 6 seconds. Consider making this configurable through the chain config.
Apply this diff:
+ // Add to ChainConfig struct + CacheBlockTTL time.Duration `toml:"cache_block_ttl"` - cache.SetWithTTL("currentblock/"+r.ChainID, currentheight, 1, 6*time.Second) + cache.SetWithTTL("currentblock/"+r.ChainID, currentheight, 1, c.CacheBlockTTL)
213-314
: Refactor Tx method for better maintainabilityThe
Tx
method is quite long and handles multiple responsibilities. Consider splitting it into smaller, focused functions.Suggested structure:
- Extract HTTP request handling into
sendTxRequest
- Extract proof handling into
handleCelestiaProof
andhandleTendermintProof
- Main
Tx
method orchestrates these operationsExample refactor:
+func (r *ReadOnlyChainConfig) sendTxRequest(hash []byte) (*jsonrpctypes.RPCResponse, error) { + // Extract HTTP request logic here +} +func (r *ReadOnlyChainConfig) handleCelestiaProof(result []byte) (*codectypes.Any, int, error) { + // Extract Celestia proof handling logic here +} +func (r *ReadOnlyChainConfig) handleTendermintProof(result []byte) (*codectypes.Any, int, error) { + // Extract Tendermint proof handling logic here +}
316-327
: Add documentation for critical typesThe
TxResultMinimal
andTxResultMinimalCelestia
types lack proper documentation explaining their purpose and usage.Add comprehensive documentation:
+// TxResultMinimal represents a minimized transaction result compatible with both Tendermint 0.34 and 0.37. +// It only contains the Height and Proof fields, omitting the Tx object which is already available in the TxProof. type TxResultMinimal struct { Height string `json:"height"` Proof tmtypes.TxProof `json:"proof"` } +// TxResultMinimalCelestia represents a Celestia-specific transaction result containing a ShareProof +// instead of a standard Tendermint TxProof, along with the transaction's index in the block. type TxResultMinimalCelestia struct { Height string `json:"height"` Proof celestiatypes.ShareProof `json:"proof"` Index uint32 `json:"index"` }x/interchainstaking/keeper/callbacks.go (1)
509-514
: Consider adding detailed error context.When unpacking the Any type fails, consider including more context in the error message to aid debugging.
- err := k.cdc.UnpackAny(res.ProofAny, &inclusionProof) - if err != nil { - return err - } + err := k.cdc.UnpackAny(res.ProofAny, &inclusionProof) + if err != nil { + return fmt.Errorf("failed to unpack inclusion proof: %w", err) + }
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (2)
go.sum
is excluded by!**/*.sum
go.work.sum
is excluded by!**/*.sum
📒 Files selected for processing (8)
go.mod
(12 hunks)icq-relayer/pkg/types/client.go
(1 hunks)utils/proofs/proofs.go
(1 hunks)utils/proofs/proofs_test.go
(1 hunks)x/claimsmanager/keeper/keeper.go
(1 hunks)x/interchainstaking/keeper/callbacks.go
(6 hunks)x/interchainstaking/keeper/hooks.go
(1 hunks)x/interchainstaking/types/codec.go
(2 hunks)
🔇 Additional comments (17)
x/interchainstaking/types/codec.go (1)
11-11
: Verify the proofs package integration across the module.
The addition of the proofs package import aligns with the breaking change described in the PR objectives.
Let's verify the integration of the proofs package:
✅ Verification successful
Let me gather more specific information about the proofs package integration.
Based on the results, I can now generate the final response:
The proofs package integration appears to be correctly implemented
The import of quicksilver/utils/proofs
is properly integrated across the codebase:
- Used in
x/interchainstaking/keeper/callbacks.go
for proof validation - Referenced in test files indicating proper test coverage
- No lingering old proof implementations that could conflict with the new system
The search results show that the breaking change from proof
to proof_any
is being properly implemented, with the new package being used in the appropriate locations for handling Celestia inclusion proofs.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Verify the proofs package integration and usage
# Expected: Find references to the new proof system across the module
# Check for any remaining references to the old proof field
echo "Checking for old proof field usage..."
rg -g '*.go' -A 3 'proof\s+[^_]' ./x/
# Check for new proof_any field usage
echo "Checking for new proof_any field usage..."
rg -g '*.go' -A 3 'proof_any' ./x/
# Check for InclusionProof interface implementations
echo "Checking for InclusionProof implementations..."
ast-grep --pattern 'type $_ struct {
$$$
}
func ($_ $_) Validate(datahash []byte) ([]byte, error) {
$$$
}'
Length of output: 9254
Script:
#!/bin/bash
# Check for the new proofs package usage
echo "Checking proofs package usage..."
rg -g '*.go' -B 2 -A 2 'quicksilver-zone/quicksilver/utils/proofs'
# Check for proof interface registrations in codec files
echo "Checking proof interface registrations..."
rg -g '*.go' -B 2 -A 2 'RegisterInterfaces.*proof'
# Check for any new proof types or structs
echo "Checking for proof types..."
ast-grep --pattern 'type $_ struct {
ProofAny *types.Any `protobuf:"bytes,$_,opt,name=proof_any,json=proofAny,proto3" json:"proof_any,omitempty"`
$$$
}'
Length of output: 2332
x/claimsmanager/keeper/keeper.go (1)
53-54
: LGTM! More robust chain ID parsing.
The updated implementation correctly handles chain IDs with multiple segments by accessing the last element of the split result.
Let's verify the chain ID formats in the codebase:
✅ Verification successful
Based on the search results, I can see various chain ID formats used across the codebase, including:
- Test chains:
testchain2
,qstest-1
,lstest-1
,lstest-2
- Production chains:
quicksilver-1
,cosmoshub-4
- Generic formats in test data and configuration files showing hyphenated formats
The updated chain ID parsing logic that uses chainParts[len(chainParts)-1]
correctly handles all these formats by taking the last segment after splitting on hyphens. This is particularly important for both test chains (e.g., qstest-1
) and production chains (e.g., quicksilver-1
, cosmoshub-4
).
Chain ID parsing implementation is robust and correct
The implementation safely handles all chain ID formats used in the codebase by accessing the last segment after splitting on hyphens. This matches the observed chain ID patterns in configurations, test data, and production settings.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Search for chain ID definitions to ensure the parsing logic handles all formats
# Test: Look for chain ID patterns in configuration files and tests
rg -g '!vendor/*' -g '*.{go,json,toml,yaml,yml}' -A 2 'chain[-_]id.*=|chain[-_]id.*:'
Length of output: 48354
utils/proofs/proofs.go (4)
1-16
: LGTM! Well-organized imports.
The imports are cleanly organized and properly versioned, particularly noting the good practice of using the versioned import for Celestia's square package.
18-26
: LGTM! Clean interface definition.
The InclusionProof
interface is well-defined with:
- Appropriate extension of
proto.Message
- Clear validation method signature
- Proper type assertions for implementations
77-81
: Use consistent hash comparison approach.
Similar to the CelestiaProof implementation, consider using direct byte comparison instead of string comparison for better performance.
86-92
: LGTM! Clean interface registration.
The registration function follows standard practice for registering interface implementations.
x/interchainstaking/keeper/hooks.go (1)
32-34
: Verify ICQ-relayer version compatibility.
Given that this PR requires ICQ-relayer v1.1.0+ for proper query response handling, we should ensure that the version check is enforced somewhere in the codebase.
utils/proofs/proofs_test.go (1)
1-14
: LGTM! Package and imports are well-organized.
The package name follows Go conventions for test files, and all imports are necessary and properly used.
go.mod (3)
9-10
: Verify Celestia dependencies integration
The addition of Celestia-related dependencies aligns with the PR objectives for handling Celestia Inclusion proofs:
github.com/celestiaorg/go-square/v2 v2.0.0
github.com/celestiaorg/nmt v0.22.2
Let's verify their usage in the codebase.
✅ Verification successful
The verification results show comprehensive integration of Celestia dependencies in the codebase:
Celestia dependencies are properly integrated and utilized
The dependencies are actively used across multiple components:
- ShareProof implementation in
third-party-chains/celestia-types/types/share_proof.go
- Protocol buffer definitions in
proto/celestia/proof.proto
- Integration in ICQ relayer at
icq-relayer/pkg/types/client.go
- Proof utilities in
utils/proofs/proofs.go
The codebase shows proper implementation of ShareProof functionality with validation, verification, and protocol buffer support, aligning with the PR objectives.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Check usage of Celestia dependencies in the codebase
# Test: Search for imports and usage of Celestia packages
echo "Checking Celestia package imports and usage..."
rg -l "celestiaorg/go-square|celestiaorg/nmt"
# Test: Look for ShareProof type usage mentioned in PR objectives
echo "Checking ShareProof type usage..."
rg "ShareProof"
Length of output: 8441
319-322
: Verify OpenTelemetry version consistency
The OpenTelemetry dependencies have been updated to v1.28.0. Ensure all related packages are on the same version:
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/metric v1.28.0
go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
✅ Verification successful
Let me generate the correct script to check OpenTelemetry versions in go.mod:
OpenTelemetry versions are consistent
All OpenTelemetry dependencies are correctly aligned at version v1.28.0:
go.opentelemetry.io/otel
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/trace
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Verify OpenTelemetry version consistency
# Test: Check for any mismatched OpenTelemetry versions
echo "Checking for OpenTelemetry version mismatches..."
rg "go.opentelemetry.io/otel.*v[0-9]+\.[0-9]+\.[0-9]+" go.mod
Length of output: 1562
Script:
#!/bin/bash
# Description: Verify OpenTelemetry version consistency
# Test: Check for any mismatched OpenTelemetry versions
echo "Checking for OpenTelemetry version mismatches..."
grep "go.opentelemetry.io/otel" go.mod
Length of output: 346
25-26
: Verify Prometheus client compatibility
The Prometheus client and common libraries have been updated:
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.1
These are major version updates that might include breaking changes.
✅ Verification successful
Let me gather more specific information about how these libraries are being used to better assess compatibility concerns.
Let me check one more aspect to see how the metrics are registered and initialized.
Prometheus client usage appears safe with the update
The codebase uses standard Prometheus patterns that are stable across versions:
- Basic metric types (Counter, Gauge, Histogram)
- Standard registration via
prometheus.NewRegistry()
- Common HTTP handler setup with
promhttp.Handler
- Standard text format encoding with
expfmt
The usage follows well-established Prometheus patterns that have remained consistent across versions, and there are no deprecated or changed APIs being used.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Check Prometheus client usage for potential breaking changes
# Test: Search for Prometheus client usage patterns that might be affected
echo "Checking Prometheus client usage..."
rg "prometheus/client_golang|prometheus/common" --type go -A 5
Length of output: 1562
Script:
#!/bin/bash
# Check specific Prometheus metric types and methods being used
echo "Checking Prometheus metric implementations..."
ast-grep --pattern 'prometheus.$_.$_'
# Check prometheus/common usage details
echo -e "\nChecking prometheus/common usage..."
rg "expfmt\." --type go -A 2
# Check for any prometheus HTTP handler setup
echo -e "\nChecking Prometheus HTTP handler setup..."
rg "promhttp\." --type go -A 2
Length of output: 1017
Script:
#!/bin/bash
# Check metric registration and initialization
echo "Checking metric registration patterns..."
rg "prometheus\.New" --type go -A 3
# Check Counter and Histogram vector definitions
echo -e "\nChecking metric vector definitions..."
rg "CounterVec|HistogramVec" --type go -A 2
Length of output: 5370
icq-relayer/pkg/types/client.go (1)
129-131
: 🛠️ Refactor suggestion
Use structured logging instead of fmt.Println
Using fmt.Println
for error logging is inconsistent with the rest of the codebase which uses structured logging via the log2
package.
Apply this diff:
if err != nil {
- fmt.Println("error getting light block", err)
+ return nil, fmt.Errorf("error getting light block: %w", err)
}
Likely invalid or redundant comment.
x/interchainstaking/keeper/callbacks.go (5)
29-29
: LGTM: Import addition aligns with PR objectives.
The new import for proofs package supports the handling of Celestia Inclusion proofs.
Line range hint 447-477
: LGTM: Improved header validation and error handling.
The changes enhance the robustness of header validation:
- Clear error messages for missing consensus state
- Proper type assertions for client and consensus states
- Comprehensive header validation through checkTMStateValidity
520-523
: LGTM: Robust transaction validation implementation.
The implementation correctly validates transaction bytes against the DataHash using the new inclusion proof system.
525-529
: Verify version compatibility requirements.
As per PR objectives, this change requires icq-relayer version 1.1.0 or higher. Ensure that this requirement is documented in the changelog and release notes.
#!/bin/bash
# Check for version documentation
rg -l "1\.1\.0.*icq-relayer" --type md
502-524
: Verify handling of ProofAny unpacking errors.
The implementation correctly handles the breaking change from proof
to proof_any
, but we should ensure proper error handling for Any type unpacking.
✅ Verification successful
Let me analyze the error handling patterns in the codebase for UnpackAny and proof validation to provide a more informed verification.
Based on the search results, I can now generate the final response as we have sufficient context about error handling patterns for UnpackAny across the codebase.
Error handling for ProofAny unpacking is consistent with codebase patterns
The implementation follows the established error handling pattern used consistently throughout the codebase for UnpackAny operations. The error is properly propagated up the call stack, matching the same pattern used in other critical areas like validator consensus key unpacking and IBC packet handling.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Check for potential error handling patterns in other files
rg -A 3 "UnpackAny.*ProofAny"
Length of output: 134
Script:
#!/bin/bash
# Search for UnpackAny usage patterns in the codebase
rg -A 3 "UnpackAny"
# Also search for similar proof validation patterns
rg -A 3 "InclusionProof.*Validate"
Length of output: 2894
Codecov ReportAttention: Patch coverage is
Additional details and impacted files@@ Coverage Diff @@
## main #1733 +/- ##
==========================================
- Coverage 62.12% 61.60% -0.52%
==========================================
Files 194 196 +2
Lines 16810 17012 +202
==========================================
+ Hits 10443 10481 +38
- Misses 5546 5685 +139
- Partials 821 846 +25
Flags with carried forward coverage won't be shown. Click here to find out more.
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 1
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
📒 Files selected for processing (2)
x/interchainstaking/keeper/callbacks.go
(6 hunks)x/interchainstaking/keeper/callbacks_test.go
(5 hunks)
🚧 Files skipped from review as they are similar to previous changes (1)
- x/interchainstaking/keeper/callbacks_test.go
🧰 Additional context used
🪛 GitHub Check: codecov/patch
x/interchainstaking/keeper/callbacks.go
[warning] 516-517: x/interchainstaking/keeper/callbacks.go#L516-L517
Added lines #L516 - L517 were not covered by tests
🔇 Additional comments (3)
x/interchainstaking/keeper/callbacks.go (3)
Line range hint 447-478
: LGTM: Header validation changes look good
The changes to CheckTMHeaderForZone
improve the method signature and error handling while maintaining the core validation logic.
502-505
: LGTM: Improved error handling for unmarshalling
The addition of error logging for unmarshalling failures improves debuggability.
524-526
: LGTM: Proof validation implementation
The implementation correctly validates the proof against the header's DataHash and transaction hash.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 8
🧹 Outside diff range and nitpick comments (24)
app/metrics.go (2)
Line range hint
53-81
: Consider enhancing error handling and timeout configuration.While the current implementation works, there are a few improvements that could make it more robust:
- The HTTP client should have explicit timeout configuration
- The response status code should be checked
- Consider adding retry logic for failed metric submissions
Here's a suggested improvement:
func (app *Quicksilver) ShipMetrics(ctx sdk.Context) { metricsFamilies, err := prometheus.DefaultGatherer.Gather() if err != nil { ctx.Logger().Error("Error gathering metrics", "error", err) + return } buf := &bytes.Buffer{} defer buf.Reset() e := expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeTextPlain)) for _, mf := range metricsFamilies { if err := e.Encode(mf); err != nil { ctx.Logger().Error("Error encoding metrics", "error", err) return } } nodeID, err := app.InitNodeID() if err != nil { ctx.Logger().Error("Error getting node ID", "error", err) return } c, cancel := context.WithTimeout(ctx.Context(), time.Second*10) defer cancel() req, err := http.NewRequestWithContext(c, "POST", fmt.Sprintf("%s/job/%s/instance/%s", app.metricsURL, ctx.ChainID(), nodeID), bytes.NewReader(buf.Bytes())) if err != nil { ctx.Logger().Error("Error creating metrics request", "error", err) return } req.Header.Set("Content-Type", string(expfmt.NewFormat(expfmt.TypeTextPlain))) - client := &http.Client{} + client := &http.Client{ + Timeout: 10 * time.Second, + } resp, err := client.Do(req) if err != nil { ctx.Logger().Error("Error sending metrics", "error", err) return } defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + ctx.Logger().Error("Non-success status code from metrics endpoint", + "status", resp.StatusCode) + return + } }
Line range hint
1-81
: Consider implementing metrics batching.The current implementation sends metrics immediately. For high-frequency metrics collection, consider implementing batching to reduce network overhead and improve performance.
app/tps_counter.go (1)
114-115
: Consider adding logging for overflow errorsWhile the overflow check is a good addition, consider logging the error before returning it to help with debugging and monitoring.
if diff > math.MaxInt64 { + tpc.logger.Error("transaction count difference exceeds int64 max value", + "latest", latest, + "previous", previous, + "status", status) return 0, fmt.Errorf("difference exceeds int64 max value") }x/interchainstaking/keeper/abci.go (1)
Line range hint
29-93
: Consider standardizing error handling patterns across BeginBlocker operations.The BeginBlocker implements different error handling patterns:
- Some operations log errors and continue
- The new parameter validation panics
- Comments explicitly mention avoiding panics for certain operations
Consider establishing consistent error handling guidelines:
- Define what constitutes a terminal vs non-terminal error
- Document when to use panic vs error logging
- Consider using metrics to track error frequencies
Example metric implementation:
// Add after error handling blocks telemetry.IncrCounter( 1.0, types.ModuleName, "begin_blocker_errors", "operation", // e.g., "gc_redelegations", "withdrawal_address", etc. )This will help monitor error patterns and identify potential issues before they become critical.
x/interchainstaking/keeper/ibc_channel_handlers.go (1)
63-74
: Good addition of overflow protection, but consider additional improvementsThe validation of the deposit interval parameter prevents potential integer overflow when casting to int64. However, consider these additional improvements:
- Add validation for the minimum acceptable value (should be > 0)
- Consider using constants for magic numbers
- Add logging when the validation fails
Here's a suggested improvement:
+ const minDepositInterval = 1 + param := k.GetParam(ctx, types.KeyDepositInterval) if param > math.MaxInt64 { + ctx.Logger().Error("deposit interval parameter exceeds int64 range", "value", param) return fmt.Errorf("deposit interval parameter exceeds int64 range: %d", param) } + if param < minDepositInterval { + ctx.Logger().Error("deposit interval parameter below minimum", "value", param, "min", minDepositInterval) + return fmt.Errorf("deposit interval parameter below minimum: %d (min: %d)", param, minDepositInterval) + }utils/proofs.go (1)
Line range hint
1-108
: Consider updating proof validation for multiple proof typesThe PR introduces support for Celestia Inclusion proofs, but this function only handles Tendermint proofs. According to the PR objectives, there's a new
InclusionProof
interface that supports bothTendermintProof
andCelestiaProof
. This function should be updated to handle both proof types.Consider:
- Updating the function signature to accept an
InclusionProof
interface- Adding a type switch to handle different proof implementations
- Moving the Tendermint-specific validation to a separate function
Example approach:
func ValidateProofOps( ctx sdk.Context, ibcKeeper *ibckeeper.Keeper, connectionID string, proof InclusionProof, // ... other params ) error { switch p := proof.(type) { case *TendermintProof: return validateTendermintProof(...) case *CelestiaProof: return validateCelestiaProof(...) default: return fmt.Errorf("unsupported proof type: %T", p) } }x/interchainstaking/types/delegation.go (2)
194-196
: Enhance error message clarity.While the negative value check is good, the error message could be more descriptive to help with debugging.
Consider this improvement:
- if maxAllocation.LT(sdk.ZeroInt()) { - return nil, errors.New("maxCanAllocate underflow") - } + if maxAllocation.LT(sdk.ZeroInt()) { + return nil, fmt.Errorf("validator %s has negative maxCanAllocate value: %s", targetAllocation.ValoperAddress, maxAllocation) + }
198-201
: Document cap breach handling logic.The cap breach handling logic is correct, but adding a comment would improve maintainability.
Consider adding this comment before line 198:
+ // If adding this allocation would exceed the maximum allowed: + // 1. Set this allocation to remaining capacity (max - current) + // 2. Update total allocation to maximum + // 3. Remove validator from future allocation rounds if delta.Amount.Add(thisAllocation).GTE(maxAllocation) { thisAllocation = maxAllocation.Sub(delta.Amount) delta.Amount = maxAllocation targetAllocations = targetAllocations.Remove(delta.ValoperAddress) }x/interchainstaking/types/rebalance.go (1)
61-66
: LGTM! Consider adding error logging for missing max allocations.The variable renaming from
max
tomaxAmount
improves code clarity. The logic for handling maximum allocation limits is correct.Consider adding debug logging when a validator's maximum allocation is not found in the
maxCanAllocate
map, as this might indicate a configuration issue:maxAmount, ok := maxCanAllocate[valoper] if !ok { + if logger != nil { + logger.Debug("No maximum allocation found for validator", "valoper", valoper) + } maxAmount = delta }x/participationrewards/keeper/rewards_validatorSelection.go (4)
85-87
: Consider consistent naming for all voting power related variables.While the renaming of
max
/min
tomaxVP
/minVP
improves code clarity, the derived variablesmaxp
/minp
don't follow the same naming convention. Consider renaming them tomaxVPPercentage
/minVPPercentage
for consistency.- maxp := sdk.NewDecFromInt(maxVP).Quo(sdk.NewDecFromInt(zs.TotalVotingPower)) - minp := sdk.NewDecFromInt(minVP).Quo(sdk.NewDecFromInt(zs.TotalVotingPower)) + maxVPPercentage := sdk.NewDecFromInt(maxVP).Quo(sdk.NewDecFromInt(zs.TotalVotingPower)) + minVPPercentage := sdk.NewDecFromInt(minVP).Quo(sdk.NewDecFromInt(zs.TotalVotingPower))Also applies to: 99-106, 119-120
Line range hint
126-132
: Document the distribution score normalization formula.The distribution score calculation uses a complex normalization formula that would benefit from documentation explaining:
- The mathematical reasoning behind the formula
- Why this specific normalization approach was chosen
- The expected range of output values
Add a comment block explaining the formula:
+ // Distribution Score Normalization: + // 1. (powerPercentage - minp) calculates how far the validator's power is from the minimum + // 2. Dividing by maxp normalizes the range + // 3. Subtracting from 1 inverts the score to favor smaller validators + // Result: Validators with lower voting power get higher distribution scores vs.DistributionScore = sdk.NewDec(1).Sub( vs.PowerPercentage.Sub(minp).Mul( sdk.NewDec(1).Quo(maxp), ), )
Line range hint
91-93
: Enhance error message for negative voting power.The error message for negative voting power could be more informative by including the actual value.
- return fmt.Errorf("unexpected negative voting power for %s", val.ValoperAddress) + return fmt.Errorf("unexpected negative voting power %s for validator %s", val.VotingPower, val.ValoperAddress)
Line range hint
238-239
: Optimize slice allocations in CalcUserValidatorSelectionAllocations.Consider preallocating the slices to avoid potential reallocations during append operations.
- userScores := make([]types.UserScore, 0) + // Count intents first to preallocate the slice + intentCount := 0 + k.icsKeeper.IterateDelegatorIntents(ctx, zone, true, func(_ int64, _ icstypes.DelegatorIntent) bool { + intentCount++ + return false + }) + userScores := make([]types.UserScore, 0, intentCount)x/interchainstaking/keeper/proposal_handler.go (2)
116-120
: LGTM: Essential overflow protection addedGood addition of the overflow check before casting to int64. This prevents potential runtime panics from integer overflow.
Consider making the error message more specific by including the maximum allowed value:
- return fmt.Errorf("validator set interval parameter exceeds int64 range: %d", param) + return fmt.Errorf("validator set interval parameter %d exceeds maximum allowed value of %d", param, math.MaxInt64)
Incomplete Celestia Support Changes
The PR does not include necessary modifications for Celestia Inclusion Proofs, such as implementing the
InclusionProof
interface. Please ensure all Celestia-related functionalities are fully integrated.🔗 Analysis chain
Line range hint
1-324
: Verify completeness of changes for Celestia supportWhile the current changes add important safety checks, given that this PR is part of a release focusing on Celestia Inclusion proofs support, we should verify if any additional changes are needed in this file to fully support Celestia zones.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for any Celestia-related configurations or proof handling in the codebase # that might require changes in zone registration or updates # Look for Celestia-related types or configurations rg -l "celestia|CelestiaProof|ShareProof" --type go # Look for proof-related interfaces that might need zone support ast-grep --pattern 'interface InclusionProof { $$$ }'Length of output: 418
x/interchainstaking/keeper/delegation.go (2)
278-280
: LGTM! Consider using a constant for uint32 max value.The overflow check is correctly placed before converting to uint32 for the waitgroup increment.
Consider defining a constant for better maintainability:
+const maxWaitgroupSize = math.MaxUint32 -if len(msgs) > math.MaxUint32 { +if len(msgs) > maxWaitgroupSize {
358-360
: Consider extracting the overflow check into a helper function.This overflow check is duplicated from the previous instance. Consider extracting it into a helper function to improve maintainability and ensure consistent error handling.
Example implementation:
+func validateMessageCount(count int) error { + if count > math.MaxUint32 { + return fmt.Errorf("number of messages exceeds uint32 range: %d", count) + } + return nil +} // In WithdrawDelegationRewardsForResponse and FlushOutstandingDelegations: -if len(msgs) > math.MaxUint32 { - return fmt.Errorf("number of messages exceeds uint32 range: %d", len(msgs)) -} +if err := validateMessageCount(len(msgs)); err != nil { + return err +}x/interchainstaking/keeper/callbacks_test.go (3)
1839-1843
: LGTM: Good defensive programming practiceThe addition of pagination total validation prevents potential integer overflow issues. The validation is correctly placed before using the total in comparisons.
Consider using a constant for the error message to maintain consistency across both locations:
+const errPaginationTotalExceedsInt = "pagination total exceeds int range: %d"
Also applies to: 1888-1892
2313-2319
: Consider extracting duplicated proof validation logicThe proof validation logic is duplicated in two locations. Consider extracting it into a helper function to improve maintainability.
+func (k Keeper) validateAndDecodeProof(payload *icqtypes.QueryResponseWithProof) (*tx.Tx, error) { + var proof proofs.InclusionProof + if err := k.GetCodec().UnpackAny(payload.ProofAny, &proof); err != nil { + return nil, err + } + + tmProof, ok := proof.(*proofs.TendermintProof) + if !ok { + return nil, fmt.Errorf("invalid proof type: expected TendermintProof") + } + + sdkTx, err := keeper.TxDecoder(k.GetCodec())(tmProof.TxProof.Data) + if err != nil { + return nil, err + } + + authTx, ok := sdkTx.(*tx.Tx) + if !ok { + return nil, fmt.Errorf("invalid transaction type: expected *tx.Tx") + } + + return authTx, nil +}Also applies to: 2417-2423
2812-2813
: Consider adding documentation for test fixturesWhile the test fixtures are properly declared as constants, adding documentation about their contents and purpose would improve maintainability.
+// txFixtureLsm contains a base64 encoded test transaction with LSM (Liquid Staking Module) data const txFixtureLsm = "ItsGCtAECpID..." +// txFixture contains a base64 encoded regular test transaction const txFixture = "ItsGCtAECpID..."x/interchainstaking/keeper/msg_server.go (4)
64-69
: Consider removing the redundant block height checkThe block height retrieved from
ctx.BlockHeight()
should always be non-negative in the blockchain context. The check forblockHeight < 0
may be unnecessary as block heights are inherently non-negative.
Line range hint
210-222
: Potential risk of index out of range inGovReopenChannel
In the
GovReopenChannel
function, after splittingportID
, there is no check to ensure thatparts
has the expected number of elements before accessingparts[0]
. This could lead to an index out of range error ifportID
does not contain a"."
character.Consider adding a check to verify that
parts
has at least one element before accessingparts[0]
.Apply this diff to address the issue:
parts := strings.Split(portID, ".") +if len(parts) < 1 { + return nil, fmt.Errorf("invalid portID format: %s", portID) +}
Line range hint
256-275
: Incorrect event type emitted inGovCloseChannel
In the
GovCloseChannel
function, the event emitted usestypes.EventTypeReopenICA
, which is intended for reopening channels. This function closes a channel, so the event type should reflect that action.Apply this diff to correct the event type:
ctx.EventManager().EmitEvents(sdk.Events{ sdk.NewEvent( sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), ), sdk.NewEvent( - types.EventTypeReopenICA, + types.EventTypeCloseICA, sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId), ), })
Line range hint
386-393
: Method naming inconsistency inGovRemoveValidatorDenyList
The method
GetDeniedValidatorInDenyList
returns a boolean indicating the presence of a validator in the deny list. For better clarity and adherence to Go naming conventions, consider renaming the method toIsValidatorInDenyList
orValidatorExistsInDenyList
.
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (1)
go.work.sum
is excluded by!**/*.sum
📒 Files selected for processing (38)
app/metrics.go
(2 hunks)app/tps_counter.go
(2 hunks)app/tps_counter_test.go
(1 hunks)test/simulation/simtypes/rand.go
(3 hunks)utils/proofs.go
(1 hunks)utils/proofs/proofs_test.go
(1 hunks)x/airdrop/keeper/msg_server_test.go
(0 hunks)x/claimsmanager/keeper/keeper.go
(1 hunks)x/epochs/types/genesis_test.go
(0 hunks)x/interchainquery/keeper/msg_server.go
(1 hunks)x/interchainstaking/client/cli/cli_test.go
(0 hunks)x/interchainstaking/genesis.go
(0 hunks)x/interchainstaking/keeper/abci.go
(2 hunks)x/interchainstaking/keeper/callbacks.go
(8 hunks)x/interchainstaking/keeper/callbacks_test.go
(7 hunks)x/interchainstaking/keeper/delegation.go
(3 hunks)x/interchainstaking/keeper/delegation_test.go
(0 hunks)x/interchainstaking/keeper/ibc_channel_handlers.go
(2 hunks)x/interchainstaking/keeper/ibc_packet_handlers.go
(5 hunks)x/interchainstaking/keeper/ibc_packet_handlers_test.go
(0 hunks)x/interchainstaking/keeper/intent_test.go
(0 hunks)x/interchainstaking/keeper/lsm.go
(1 hunks)x/interchainstaking/keeper/msg_server.go
(1 hunks)x/interchainstaking/keeper/msg_server_test.go
(0 hunks)x/interchainstaking/keeper/proposal_handler.go
(2 hunks)x/interchainstaking/types/accounts_test.go
(0 hunks)x/interchainstaking/types/delegation.go
(2 hunks)x/interchainstaking/types/delegation_test.go
(0 hunks)x/interchainstaking/types/keys.go
(1 hunks)x/interchainstaking/types/rebalance.go
(1 hunks)x/mint/keeper/keeper.go
(2 hunks)x/mint/simulation/decoder_test.go
(0 hunks)x/participationrewards/keeper/distribution_test.go
(0 hunks)x/participationrewards/keeper/msg_server_test.go
(0 hunks)x/participationrewards/keeper/rewards_holdings_test.go
(0 hunks)x/participationrewards/keeper/rewards_validatorSelection.go
(3 hunks)x/participationrewards/keeper/rewards_validatorSelection_test.go
(0 hunks)x/participationrewards/types/keys.go
(2 hunks)
🔥 Files not summarized due to errors (1)
- x/interchainstaking/keeper/delegation_test.go: Error: Server error: no LLM provider could handle the message
💤 Files with no reviewable changes (15)
- x/airdrop/keeper/msg_server_test.go
- x/epochs/types/genesis_test.go
- x/interchainstaking/client/cli/cli_test.go
- x/interchainstaking/genesis.go
- x/interchainstaking/keeper/delegation_test.go
- x/interchainstaking/keeper/ibc_packet_handlers_test.go
- x/interchainstaking/keeper/intent_test.go
- x/interchainstaking/keeper/msg_server_test.go
- x/interchainstaking/types/accounts_test.go
- x/interchainstaking/types/delegation_test.go
- x/mint/simulation/decoder_test.go
- x/participationrewards/keeper/distribution_test.go
- x/participationrewards/keeper/msg_server_test.go
- x/participationrewards/keeper/rewards_holdings_test.go
- x/participationrewards/keeper/rewards_validatorSelection_test.go
✅ Files skipped from review due to trivial changes (1)
- app/tps_counter_test.go
🚧 Files skipped from review as they are similar to previous changes (2)
- utils/proofs/proofs_test.go
- x/claimsmanager/keeper/keeper.go
🧰 Additional context used
🪛 GitHub Check: lint
x/participationrewards/types/keys.go
[failure] 33-33:
G115: integer overflow conversion int32 -> uint64 (gosec)
🔇 Additional comments (31)
x/participationrewards/types/keys.go (3)
4-5
: LGTM: Import changes are appropriate
The addition of the "fmt" import is necessary for the new error message formatting.
30-32
:
Address potential integer overflow in type conversion
While the negative value check is good, there's a potential integer overflow when converting from ProtocolDataType
(presumably int32) to uint64. This could lead to unexpected behavior if large negative values are provided.
Consider this safer implementation:
if pdType < 0 {
panic(fmt.Sprintf("protocol data type is negative: %d", pdType))
}
-return append(sdk.Uint64ToBigEndian(uint64(pdType)), key...)
+// Ensure safe conversion
+positiveValue := uint64(pdType)
+if positiveValue > math.MaxUint64 {
+ panic(fmt.Sprintf("protocol data type exceeds maximum allowed value: %d", pdType))
+}
+return append(sdk.Uint64ToBigEndian(positiveValue), key...)
Likely invalid or redundant comment.
29-40
: Verify impact on existing clients and update documentation
These new validation checks are breaking changes that could affect existing clients. Please ensure that:
- The changes are documented in the module's documentation
- Migration guides are updated if necessary
- Clients are not relying on the previous behavior of accepting negative protocol data types
🧰 Tools
🪛 GitHub Check: lint
[failure] 33-33:
G115: integer overflow conversion int32 -> uint64 (gosec)
[failure] 40-40:
G115: integer overflow conversion int32 -> uint64 (gosec)
app/metrics.go (1)
53-53
: LGTM! More explicit format type handling.
The changes make the format type handling more explicit and consistent between the encoder initialization and content type header.
Also applies to: 73-73
app/tps_counter.go (2)
5-6
: LGTM: Required imports added
The new imports are necessary for the enhanced error handling implementation.
107-118
:
Review error handling strategy in start method
The start
method currently panics when recordValue
returns an error. Consider handling these errors gracefully instead of causing service disruption.
- nSuccess, err := tpc.recordValue(ctx, latestNSuccessful, lastNSuccessful, statusSuccess)
- if err == nil {
- nTxn += nSuccess
- } else {
- panic(err)
- }
+ nSuccess, err := tpc.recordValue(ctx, latestNSuccessful, lastNSuccessful, statusSuccess)
+ if err != nil {
+ tpc.logger.Error("Failed to record successful transactions", "error", err)
+ continue
+ }
+ nTxn += nSuccess
Likely invalid or redundant comment.
x/interchainstaking/keeper/abci.go (1)
5-6
: LGTM: Required imports added for parameter validation.
The new imports fmt
and math
are necessary for the parameter validation logic.
x/interchainstaking/keeper/ibc_channel_handlers.go (2)
5-5
: LGTM: Import addition is appropriate
The addition of the math
package import is necessary for the new parameter validation logic.
63-74
: Verify testing coverage for parameter validation
Since this change affects the deposit address initialization flow, which is critical for the interchainstaking module, ensure that:
- The parameter validation is covered by unit tests
- Integration tests verify the behavior with various parameter values
- The change is documented in the upgrade guide if it affects existing deployments
x/interchainstaking/keeper/lsm.go (2)
Line range hint 1-156
: Overall code quality is excellent.
The file demonstrates:
- Clear separation of concerns between different cap validation methods
- Consistent error handling patterns
- Proper use of cosmos-sdk types and math operations
- Good documentation and naming conventions
42-42
:
Parameter rename improves clarity but introduces a breaking change.
The parameter rename from cap
to lsmCaps
in the callback function signature improves code clarity by using a more descriptive name. However, this is a breaking change that could affect existing code that uses this method.
Let's verify the impact of this breaking change:
✅ Verification successful
Parameter rename from cap
to lsmCaps
does not introduce breaking changes.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Search for any direct usage of IterateLsmCaps in the codebase
# to identify code that needs to be updated.
# Search for direct usage of IterateLsmCaps
rg -A 3 "IterateLsmCaps\(" --type go
# Search for any interface definitions that might include this method
ast-grep --pattern 'type $_ interface {
$$$
IterateLsmCaps($$$)
$$$
}'
Length of output: 774
test/simulation/simtypes/rand.go (4)
48-55
: LGTM! Parameter renaming improves clarity.
The renaming from max
to maxPositiveInt
better describes the parameter's purpose and constraints. The function's implementation remains robust with proper validation and edge case handling.
60-60
: LGTM! Parameter renaming maintains semantic clarity.
The renaming from max
to maxAmount
better reflects the parameter's purpose. The intentional bias towards returning 0 and max values (10% chance each) is well-documented and preserved.
Also applies to: 68-68, 70-70
78-78
: LGTM! Parameter renaming aligns with decimal type.
The renaming from max
to maxDec
better indicates that this parameter is a sdk.Dec
type. The implementation maintains proper decimal precision handling and consistent bias distribution.
Also applies to: 85-85, 87-87
101-102
: LGTM! Parameter renaming improves type clarity.
The renaming from min
/max
to minInt
/maxInt
better indicates that these parameters are integers. The implementation correctly maintains inclusive range behavior.
x/mint/keeper/keeper.go (2)
4-5
: LGTM: Import required for int64 bounds checking.
The addition of the math
package import is appropriate for accessing the MaxInt64
constant used in the new validation.
90-94
: Verify existing data and document breaking change.
Since this validation could potentially break existing chains if any stored epoch numbers exceed MaxInt64, we should:
- Verify there are no existing values that would fail this validation
- Document this breaking change in the upgrade handler
x/interchainstaking/types/delegation.go (2)
181-181
: LGTM! Variable rename improves clarity.
The rename from max
to maxAllocation
makes the variable's purpose more explicit and improves code readability.
Line range hint 235-238
: Verify commented dust handling code.
There's commented-out code for dust handling at the end of the function. Please verify if this code should be removed or implemented.
Let's check if dust handling is implemented elsewhere or if there are related issues:
x/interchainstaking/keeper/proposal_handler.go (1)
6-6
: LGTM: Import addition is appropriate
The addition of the "math" package import is necessary for the new validation logic using math.MaxInt64
.
x/interchainstaking/keeper/callbacks.go (4)
420-422
: LGTM: Good security practice
Added integer overflow protection for trusted height revision, preventing potential security issues.
452-452
: LGTM: Improved error handling
The error message now includes the trusted height string, making debugging easier.
Also applies to: 463-465
529-531
: LGTM: Proper proof validation
The implementation correctly validates the proof against the header's DataHash and returns the transaction bytes, aligning with the new InclusionProof
interface requirements.
507-511
: Breaking Change: New proof handling implementation
This implements the breaking change mentioned in the PR objectives where proof
is replaced with proof_any
of type Any
. The new implementation uses the InclusionProof
interface for validating proofs.
Also applies to: 514-515
✅ Verification successful
Breaking Change: New proof handling implementation verified
No remaining uses of the old proof
field found in x/interchainstaking/keeper/callbacks.go
.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Check for any remaining uses of the old proof field
rg -A 3 'proof\s+[^_]'
Length of output: 23092
x/interchainstaking/keeper/callbacks_test.go (3)
41-41
: LGTM: Import addition aligns with new proof handling
The addition of the proofs package import is consistent with the PR's objective to support Celestia Inclusion proofs.
2231-2231
: LGTM: Header validation update
The update to CheckTMHeaderForZone
to accept a header pointer is consistent with the changes in the main implementation.
2313-2319
: Verify compatibility with icq-relayer v1.1.0+
As mentioned in the PR objectives, these changes require icq-relayer v1.1.0 or higher. Let's verify the dependency constraints.
x/interchainstaking/keeper/msg_server.go (1)
Line range hint 298-319
: Missing authority check in GovSetLsmCaps
In the GovSetLsmCaps
function, while the authority of the message sender is validated, there is no verification of the values within msg.Caps
. Invalid or extreme values could adversely affect the system.
Consider adding validation for msg.Caps
to ensure the caps are within acceptable ranges.
x/interchainstaking/keeper/ibc_packet_handlers.go (3)
6-6
: Approved: Proper aliasing of the standard math
package
The standard math
package is correctly aliased as stdmath
to avoid conflicts with the imported cosmossdk.io/math
package.
509-512
: Approved: Added overflow check before casting to int64
Adding the overflow check for param
before casting to int64
prevents potential overflow issues when the parameter exceeds the int64
range.
1352-1355
: Duplicate Comment: Added overflow check before casting to int64
The overflow check added here is identical to the one in lines 509-512 and effectively prevents potential overflows.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 2
🧹 Outside diff range and nitpick comments (7)
x/airdrop/keeper/claim_handler.go (2)
158-160
: Improve error messages for amount verification failuresThe error messages for insufficient amounts could be more descriptive to help with debugging:
Consider enhancing the error messages:
-return fmt.Errorf("insufficient deposit amount, expects %v got %v", tAmount, gdAmount) +return fmt.Errorf("insufficient deposit amount for address %s: required %v %s, got %v %s", + cr.Address, tAmount, zone.BaseDenom, gdAmount, zone.BaseDenom) -return fmt.Errorf("insufficient liquidity, expects at least %s, got %s", tAmount, uAmount) +return fmt.Errorf("insufficient Osmosis LP liquidity for address %s: required %s, got %s", + cr.Address, tAmount, uAmount)Also applies to: 300-302
Incomplete Implementation of
ActionGbP
ConfirmedThe
ActionGbP
action is defined and referenced but lacks an implementation in the handler, which can lead to issues if accessed.
x/airdrop/keeper/claim_handler.go
handlestypes.ActionGbP
without an implementation.🔗 Analysis chain
Line range hint
52-53
: Incomplete implementation forActionGbP
Given that this is a release version (v1.7.x), having an unimplemented action handler could be problematic if the action is accessible to users. Consider either:
- Implementing the handler before the release
- Ensuring the action is not accessible until implemented
- Adding proper error handling for this unimplemented case
Let's verify if this action is accessible to users:
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Search for any references to ActionGbP in the codebase rg "ActionGbP" --type go # Search for any proto definitions or constants related to this action ast-grep --pattern 'const ActionGbP = $_'Length of output: 283
x/interchainstaking/keeper/redemptions.go (1)
257-259
: Consider improving error handling and function structure.While reviewing the waitgroup change, I noticed some opportunities for improvement in the surrounding code:
- The error handling could be more granular
- The function is quite long and handles multiple responsibilities
Consider breaking down the function into smaller, more focused functions:
func (k *Keeper) HandleQueuedUnbondings(ctx sdk.Context, zone *types.Zone, epoch int64) error { + // Extract to separate functions + withdrawals, err := k.collectQueuedWithdrawals(ctx, zone) + if err != nil { + return fmt.Errorf("failed to collect withdrawals: %w", err) + } + + msgs, err := k.prepareUnbondingMessages(ctx, zone, withdrawals) + if err != nil { + return fmt.Errorf("failed to prepare messages: %w", err) + } + + if err := k.submitUnbondingTransaction(ctx, zone, msgs, epoch); err != nil { + return fmt.Errorf("failed to submit transaction: %w", err) + }x/interchainstaking/keeper/receipt.go (3)
Line range hint
32-165
: Consider refactoring HandleReceiptTransaction for better maintainabilityThe function is quite long and handles multiple responsibilities (memo processing, validation, minting, transfers). Consider breaking it down into smaller, more focused functions.
Suggested refactoring:
- Extract memo processing into a separate function:
func (k *Keeper) processMemo(memo string, zone *types.Zone, assets sdk.Coins) (types.MemoFields, error) { if len(memo) == 0 { return types.MemoFields{}, nil } return zone.DecodeMemo(memo) }
- Extract asset validation into a separate function:
func (k *Keeper) validateAssets(ctx sdk.Context, zone *types.Zone, assets sdk.Coins) (bool, bool) { return zone.ValidateCoinsForZone(assets, k.GetValidatorAddressesAsMap(ctx, zone.ChainId)) }This would improve readability and make the code easier to test and maintain.
Line range hint
209-279
: Enhance error handling in MintAndSendQAssetThe function has good logging but could benefit from more structured error handling and validation.
Consider these improvements:
- Add validation for empty assets:
+ if assets.IsZero() { + return errors.New("empty assets") + }
- Use more specific error types:
- return errors.New("zero redemption rate") + return fmt.Errorf("zero redemption rate for zone %s", zone.ChainId)
- Consider adding metrics for different transfer scenarios:
if zone.ReturnToSender || memoRTS { metrics.IncCounter("qasset_transfers_rts") } else if mappedAddress != nil { metrics.IncCounter("qasset_transfers_mapped") } else { metrics.IncCounter("qasset_transfers_direct") }
Line range hint
301-338
: Add retry mechanism for ICA transactionsThe ProdSubmitTx function handles message chunking well but lacks retry logic for failed transactions.
Consider implementing a retry mechanism with exponential backoff for failed ICA transactions. This would improve reliability, especially during network congestion or temporary outages.
Example approach:
func (k *Keeper) submitWithRetry(ctx sdk.Context, connectionID, portID string, packetData icatypes.InterchainAccountPacketData, timeoutTimestamp uint64) error { backoff := time.Second maxRetries := 3 for i := 0; i < maxRetries; i++ { _, err := k.ICAControllerKeeper.SendTx(ctx, nil, connectionID, portID, packetData, timeoutTimestamp) if err == nil { return nil } if i < maxRetries-1 { time.Sleep(backoff) backoff *= 2 } } return fmt.Errorf("failed after %d retries", maxRetries) }x/interchainstaking/keeper/callbacks_test.go (1)
2812-2813
: Consider documenting test fixturesConsider adding comments to explain what these fixtures represent and how they differ (e.g., what scenarios they test).
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
📒 Files selected for processing (17)
x/airdrop/keeper/claim_handler.go
(2 hunks)x/airdrop/keeper/claim_record.go
(3 hunks)x/airdrop/keeper/msg_server.go
(1 hunks)x/airdrop/types/airdrop_test.go
(2 hunks)x/claimsmanager/types/keys.go
(1 hunks)x/interchainstaking/keeper/callbacks.go
(8 hunks)x/interchainstaking/keeper/callbacks_test.go
(7 hunks)x/interchainstaking/keeper/delegation.go
(3 hunks)x/interchainstaking/keeper/proposal_handler.go
(3 hunks)x/interchainstaking/keeper/receipt.go
(2 hunks)x/interchainstaking/keeper/redemptions.go
(1 hunks)x/interchainstaking/keeper/withdrawal_record.go
(1 hunks)x/interchainstaking/types/keys.go
(2 hunks)x/mint/keeper/keeper.go
(2 hunks)x/participationrewards/keeper/callbacks.go
(1 hunks)x/participationrewards/keeper/submodule_osmosis.go
(1 hunks)x/participationrewards/types/keys.go
(2 hunks)
✅ Files skipped from review due to trivial changes (6)
- x/airdrop/keeper/claim_record.go
- x/airdrop/keeper/msg_server.go
- x/claimsmanager/types/keys.go
- x/interchainstaking/keeper/withdrawal_record.go
- x/participationrewards/keeper/callbacks.go
- x/participationrewards/keeper/submodule_osmosis.go
🚧 Files skipped from review as they are similar to previous changes (6)
- x/interchainstaking/keeper/callbacks.go
- x/interchainstaking/keeper/delegation.go
- x/interchainstaking/keeper/proposal_handler.go
- x/interchainstaking/types/keys.go
- x/mint/keeper/keeper.go
- x/participationrewards/types/keys.go
🔇 Additional comments (8)
x/airdrop/keeper/claim_handler.go (1)
157-157
:
Security: Potential integer overflow in amount calculations
The gosec linter warnings are being suppressed for two critical calculations:
threshold.MulInt64(int64(cr.BaseValue))
inverifyDeposit
dThreshold.MulInt64(int64(cr.BaseValue / 2))
inverifyOsmosisLP
These calculations involve unsafe type conversions from uint64
to int64
which could lead to integer overflow. This is particularly concerning as these values are used to determine airdrop amounts.
Consider implementing safe conversion checks:
-tAmount := threshold.MulInt64(int64(cr.BaseValue)).TruncateInt() //nolint:gosec
+baseValue := cr.BaseValue
+if baseValue > uint64(math.MaxInt64) {
+ return fmt.Errorf("base value %d exceeds maximum safe integer value", baseValue)
+}
+tAmount := threshold.MulInt64(int64(baseValue)).TruncateInt()
Similar safety check should be added for the calculation in verifyOsmosisLP
.
Let's verify the potential impact:
Also applies to: 299-299
x/interchainstaking/keeper/redemptions.go (1)
257-257
: Verify the security implications of suppressing gosec linter.
The addition of //nolint:gosec
suppresses a security linter warning. While this might be intentional, we should verify that:
- The suppression is necessary and documented
- The concurrent operation handling is thread-safe
Let's analyze the usage of this waitgroup across the codebase:
x/interchainstaking/keeper/callbacks_test.go (6)
41-41
: LGTM: Import aligns with new proof handling system
The addition of the proofs package import is consistent with the PR's objective to implement the new InclusionProof
interface.
1839-1843
: LGTM: Good security practice for pagination handling
The addition of the pagination overflow check is a crucial security enhancement that prevents potential integer overflow vulnerabilities.
1888-1892
: Duplicate pagination overflow check
This is a duplicate of the previous pagination overflow check implementation.
2231-2231
: LGTM: Added zone header validation
The addition of CheckTMHeaderForZone
validation is a good practice for ensuring header integrity.
2417-2423
: Duplicate proof handling implementation
This is a duplicate of the previous proof handling implementation.
2313-2319
: Verify proof handling matches PR requirements
The implementation correctly uses the new ProofAny
field and properly handles type assertions for TendermintProof
. This aligns with the PR's breaking change requirements.
* add periodic poling icq * revert redundant callback
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 0
🧹 Outside diff range and nitpick comments (2)
x/interchainstaking/keeper/zones.go (2)
306-316
: LGTM! Consider refactoring to reduce duplication.The new case for handling delegation address is implemented correctly, following the same pattern as other cases. However, there's significant code duplication across all cases that could be refactored into a helper function.
Consider extracting the common logic into a helper function:
+func (k *Keeper) updateAccountBalance(ctx sdk.Context, account *types.ICAAccount, coin sdk.Coin, accountType string) error { + existing := account.Balance.AmountOf(coin.Denom) + err := account.SetBalance(account.Balance.Sub(sdk.NewCoins(sdk.NewCoin(coin.Denom, existing))...).Add(coin)) + if err != nil { + return err + } + err = account.DecrementBalanceWaitgroup() + if err != nil { + return err + } + k.Logger(ctx).Info(fmt.Sprintf("Matched %s address", accountType), "address", account.Address, "wg", account.BalanceWaitgroup, "balance", account.Balance) + return nil +}Then use it in the switch cases:
case zone.DelegationAddress != nil && address == zone.DelegationAddress.Address: - existing := zone.DelegationAddress.Balance.AmountOf(coin.Denom) - err = zone.DelegationAddress.SetBalance(zone.DelegationAddress.Balance.Sub(sdk.NewCoins(sdk.NewCoin(coin.Denom, existing))...).Add(coin)) - if err != nil { - return err - } - err = zone.DelegationAddress.DecrementBalanceWaitgroup() - if err != nil { - return err - } - k.Logger(ctx).Info("Matched delegation address", "address", address, "wg", zone.DelegationAddress.BalanceWaitgroup, "balance", zone.DelegationAddress.Balance) + err = k.updateAccountBalance(ctx, zone.DelegationAddress, coin, "delegation") + if err != nil { + return err + }
The switch statement can be safely removed
Based on the code context, the switch statement appears to be legacy code that's no longer necessary. The comments clearly indicate:
- "? is this switch statement still required ?"
- "prior to callback we had no way to distinguish the originator"
- "with the query type in setAccountCb this is probably superfluous..."
The switch statement is only distinguishing between deposit and withdrawal addresses, which can now be handled through the query type in the callback system, making this branching logic redundant.
- Consider removing the switch statement in
SetAccountBalanceForDenom
and refactor to use the query type callback mechanism instead- The current implementation in
x/interchainstaking/keeper/zones.go
contains technical debt that should be cleaned up🔗 Analysis chain
Line range hint
282-282
: Verify if the switch statement is still necessary.The comment "? is this switch statement still required ?" suggests uncertainty about the implementation. Consider investigating if the query type in
setAccountCb
could provide a more elegant solution.🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash # Description: Search for setAccountCb usage and query type handling # Look for setAccountCb implementation ast-grep --pattern 'func $_(ctx $_, $_, $_, queryType $_) { $$$ }' # Search for query type definitions and usage rg -A 5 'setAccountCb.*queryType'Length of output: 106
Script:
#!/bin/bash # Let's try a broader search to understand the context better # Search for setAccountCb function definition and usage rg -A 10 "setAccountCb" # Search for the switch statement and surrounding context rg -B 5 -A 15 "switch.*AccountType" # Look for any query type related code rg -A 5 "queryType"Length of output: 6638
Script:
#!/bin/bash # Let's look for the complete context around the switch statement and setAccountCb # Get more context around the comment and switch statement rg -B 10 -A 20 "with the query type in setAccountCb" # Look for any other references to AccountType in the same file rg -l "AccountType" x/interchainstaking/keeper/zones.go # Search for the complete function containing this switch ast-grep --pattern 'func $_(ctx $_, zone $_, address $_, coin $_) { $$$ }'Length of output: 2742
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
📒 Files selected for processing (3)
x/interchainstaking/keeper/abci.go
(3 hunks)x/interchainstaking/keeper/callbacks_test.go
(8 hunks)x/interchainstaking/keeper/zones.go
(1 hunks)
🚧 Files skipped from review as they are similar to previous changes (2)
- x/interchainstaking/keeper/abci.go
- x/interchainstaking/keeper/callbacks_test.go
🔇 Additional comments (1)
x/interchainstaking/keeper/zones.go (1)
306-316
: Clarify the relationship with Celestia Inclusion proofs.
The PR's main objective is to address Celestia Inclusion proofs, but these changes appear to be unrelated. Could you clarify how these account balance handling changes relate to the PR's main objective?
…ledgement; make RR logic ignore unbonding and escrowed tokens, add v1.7.0 upgrade handler (#1742)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 0
🧹 Outside diff range and nitpick comments (14)
app/upgrades/upgrades.go (1)
Line range hint
1-28
: Document breaking changes in upgrade guideSince this upgrade introduces breaking changes around Celestia Inclusion proofs and requires icq-relayer v1.1.0+, consider adding clear documentation about:
- The deprecation of
proof
field in favor ofproof_any
- The minimum required icq-relayer version
- The migration steps for existing implementations
Would you like me to help draft the upgrade documentation?
x/interchainstaking/keeper/abci.go (1)
35-35
: Remove commented code instead of leaving it as a comment.Since this functionality is being replaced by
HandleMaturedWithdrawals
, the commented code should be removed rather than left in place. This improves code maintainability by reducing clutter.-//k.HandleMaturedUnbondings(ctx)
🧰 Tools
🪛 GitHub Check: lint
[failure] 35-35:
commentFormatting: put a space between//
and comment text (gocritic)proto/quicksilver/interchainstaking/v1/interchainstaking.proto (1)
142-145
: Consider documenting timestamp handling requirements.Since completion times are crucial for tracking unbonding operations, consider:
- Documenting the expected timezone (UTC recommended)
- Adding validation rules for completion times
- Establishing clear policies for timestamp updates
x/interchainstaking/keeper/keeper_test.go (3)
Line range hint
516-517
: Consider extracting magic numbers into named constants.The test uses magic numbers (e.g., 30, 500, 166, 167) for redemption rate calculations. Consider extracting these into named constants to improve readability and maintainability.
+const ( + smallRewardAmount = 30 + largeRewardAmount = 500 + delegationIncrementA = 166 + delegationIncrementB = 167 +)Also applies to: 520-521, 524-525
Line range hint
849-1007
: Consider refactoring duplicate test structures.The
TestGetClaimedPercentage
andTestGetClaimedPercentageByClaimType
share similar test structures and setup code. Consider extracting common test setup and helper functions to reduce code duplication.+func (suite *KeeperTestSuite) setupClaimTestCase(claims []claimsmanagertypes.Claim, totalSupply math.Int) (*app.Quicksilver, sdk.Context, *icstypes.Zone) { + suite.SetupTest() + suite.setupTestZones() + + quicksilver := suite.GetQuicksilverApp(suite.chainA) + ctx := suite.chainA.GetContext() + zone, found := quicksilver.InterchainstakingKeeper.GetZone(ctx, suite.chainB.ChainID) + suite.True(found) + + for _, record := range claims { + quicksilver.InterchainstakingKeeper.ClaimsManagerKeeper.SetClaim(ctx, &record) + } + + err := quicksilver.MintKeeper.MintCoins(ctx, sdk.NewCoins(sdk.NewCoin(zone.LocalDenom, totalSupply))) + suite.NoError(err) + + return quicksilver, ctx, &zone +}
Line range hint
127-129
: Remove commented debug code.The commented debug code should be removed as it's not providing any value and could confuse future maintainers.
- // channel, found := quicksilver.IBCKeeper.ChannelKeeper.GetChannel(ctx, portID, channelID) - // suite.True(found) - // fmt.Printf("DEBUG: channel >>>\n%v\n<<<\n", channel)x/interchainstaking/keeper/ibc_packet_handlers.go (1)
1370-1374
: Extract duplicate parameter validation into a helper function.The parameter validation logic is duplicated. Consider extracting it into a helper function to improve maintainability and reduce code duplication.
+func (k *Keeper) validateValidatorSetInterval(param uint64) error { + if param > stdmath.MaxInt64 { + return fmt.Errorf("validator set interval parameter exceeds int64 range: %d", param) + } + return nil +} -param := k.GetParam(ctx, types.KeyValidatorSetInterval) -if param > stdmath.MaxInt64 { - return fmt.Errorf("validator set interval parameter exceeds int64 range: %d", param) -} +param := k.GetParam(ctx, types.KeyValidatorSetInterval) +if err := k.validateValidatorSetInterval(param); err != nil { + return err +}x/interchainstaking/keeper/ibc_packet_handlers_test.go (3)
Line range hint
1-3747
: Consider enhancing test helper functions for better coverage and error handling.The test helper functions could be improved in the following ways:
makeAckForMsgs
could be extended to handle more message types beyond just MsgUndelegate- Error handling in
makePacketFromMsgs
could be more robust with validation of input parametersConsider applying these changes:
func makeAckForMsgs(ctx sdk.Context, cdc codec.Codec, msgs []sdk.Msg, success bool) (channeltypes.Acknowledgement, error) { if !success { return channeltypes.NewErrorAcknowledgement(fmt.Errorf("an error happened")), nil } msgData := sdk.TxMsgData{} for _, msg := range msgs { actualMsg := codectypes.UnsafePackAny(msg) + // Add support for other message types switch actualMsg.TypeUrl { case "/cosmos.staking.v1beta1.MsgUndelegate": t := ctx.BlockTime().Add(time.Hour * 6) resp := stakingtypes.MsgUndelegateResponse{CompletionTime: t} respAny, err := codectypes.NewAnyWithValue(&resp) if err != nil { return channeltypes.Acknowledgement{}, err } msgData.MsgResponses = append(msgData.MsgResponses, respAny) + case "/cosmos.staking.v1beta1.MsgDelegate": + resp := stakingtypes.MsgDelegateResponse{} + respAny, err := codectypes.NewAnyWithValue(&resp) + if err != nil { + return channeltypes.Acknowledgement{}, err + } + msgData.MsgResponses = append(msgData.MsgResponses, respAny) + // Add more cases as needed } }
3747-3747
: Fix indentation for better code readability.The line has incorrect indentation which affects code readability.
err := quicksilver.InterchainstakingKeeper.HandleMaturedWithdrawals(ctx, &zone) + err := quicksilver.InterchainstakingKeeper.HandleMaturedWithdrawals(ctx, &zone)
Line range hint
4000-4100
: Consider adding more edge cases to failure handling tests.The TestHandleFailedDelegate test cases could be enhanced with additional scenarios:
- Test behavior with invalid validator addresses
- Test with malformed memo strings
- Test with zero amount delegations
Example test case to add:
{ name: "zero amount delegation", setupConnection: true, message: func(zone *types.Zone) sdk.Msg { return &stakingtypes.MsgDelegate{ DelegatorAddress: zone.DelegationAddress.Address, ValidatorAddress: vals[0], Amount: sdk.NewCoin("uatom", sdk.NewInt(0)), } }, memo: "batch/12345678", err: true, check: false, },docs/swagger.yml (2)
Line range hint
1-5000
: Consider adding breaking changes documentationWhile the API documentation is comprehensive, given this is a breaking change release (v1.7.x), consider adding explicit documentation about:
- Breaking changes in the API
- Version compatibility requirements
- Migration guidelines for API consumers
This would help API consumers understand and prepare for the changes, especially regarding the new Celestia Inclusion proofs handling.
Line range hint
1-5000
: Document new Celestia Inclusion proof handlingThe PR objectives mention changes to handle Celestia Inclusion proofs, specifically:
- Deprecation of the
proof
field- Addition of
proof_any
field of typeAny
- New
InclusionProof
interface supportHowever, these changes are not reflected in the API documentation. Please update the relevant endpoint schemas to include these changes.
app/upgrades/v1_7.go (1)
24-25
: Consider externalizing hardcoded hash values for maintainabilityHardcoding hash values like
"0c8269f04109a55a152d3cdfd22937b4e5c2746111d579935eef4cd7ffa71f7f"
directly in the code may reduce maintainability and flexibility. Consider externalizing these values to a configuration file or constants, especially if you anticipate adding or modifying hashes in the future.🧰 Tools
🪛 GitHub Check: devskim
[failure] 24-24: A token or key was found in source code. If this represents a secret, it should be moved somewhere else.
Do not store tokens or keys in source code.x/interchainstaking/keeper/keeper.go (1)
728-730
: Remove obsolete commented-out code for clarityThe code at lines 728-730 is commented out and appears to be no longer needed after removing unbonding tokens from the redemption rate logic. Removing unused code improves readability and maintainability.
Apply this diff to remove the commented-out code:
- // nativeAssetUnbonding, _ := k.GetWithdrawnTokensAndCount(ctx, zone) - // nativeAssetUnbondingAmount := nativeAssetUnbonding.Amount - // nativeAssetUnbonded := zone.DelegationAddress.Balance.AmountOf(zone.BaseDenom)
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (1)
x/interchainstaking/types/interchainstaking.pb.go
is excluded by!**/*.pb.go
📒 Files selected for processing (12)
app/upgrades/types.go
(1 hunks)app/upgrades/upgrades.go
(1 hunks)app/upgrades/v1_7.go
(1 hunks)docs/swagger.yml
(1 hunks)proto/quicksilver/interchainstaking/v1/interchainstaking.proto
(1 hunks)x/interchainstaking/keeper/abci.go
(4 hunks)x/interchainstaking/keeper/hooks.go
(2 hunks)x/interchainstaking/keeper/ibc_packet_handlers.go
(8 hunks)x/interchainstaking/keeper/ibc_packet_handlers_test.go
(1 hunks)x/interchainstaking/keeper/keeper.go
(2 hunks)x/interchainstaking/keeper/keeper_test.go
(1 hunks)x/interchainstaking/keeper/zones.go
(3 hunks)
🚧 Files skipped from review as they are similar to previous changes (2)
- x/interchainstaking/keeper/hooks.go
- x/interchainstaking/keeper/zones.go
🧰 Additional context used
🪛 GitHub Check: devskim
app/upgrades/v1_7.go
[failure] 24-24: A token or key was found in source code. If this represents a secret, it should be moved somewhere else.
Do not store tokens or keys in source code.
🪛 GitHub Check: lint
x/interchainstaking/keeper/abci.go
[failure] 35-35:
commentFormatting: put a space between //
and comment text (gocritic)
🔇 Additional comments (16)
app/upgrades/upgrades.go (1)
27-27
: LGTM! The upgrade entry follows the established pattern.
The addition of the v1.7.0 upgrade handler maintains consistency with the existing upgrade structure.
Let's verify the upgrade handler implementation:
app/upgrades/types.go (2)
51-52
: LGTM! Version constant follows semantic versioning.
The new upgrade constant V010700UpgradeName
is correctly placed in the mainnet upgrades section and follows the established naming pattern.
51-52
: Verify the upgrade handler implementation.
Since this is a breaking change release that affects Celestia Inclusion proofs and deprecates the proof
field in favor of proof_any
, ensure that the upgrade handler properly manages the transition.
✅ Verification successful
Let me try a different search strategy to verify the upgrade handler implementation.
Let me try one more search strategy focusing on the upgrade handlers directory and any proof-related changes.
Let me check the v1.7.0 upgrade handler specifically since we can see it exists in the codebase.
No issues found with the v1.7.0 upgrade handler implementation
The verification shows that while there are proof-related changes in the codebase (particularly in the interchainquery module with proof_any
field), the v1.7.0 upgrade handler in app/upgrades/v1_7.go
is focused on different concerns:
- Handling withdrawal records for cosmoshub-4
- Managing coin minting and transfers
- Updating redemption rates
The proof-related changes appear to be part of the regular codebase structure rather than requiring specific migration handling in the upgrade.
🏁 Scripts executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Description: Verify the upgrade handler implementation for v1.7.0
# Check for the upgrade handler implementation
echo "Searching for v1.7.0 upgrade handler..."
rg -A 10 "V010700UpgradeHandler.*func.*"
# Check for proof field deprecation handling
echo "Checking proof field deprecation handling..."
ast-grep --pattern 'type GetTxWithProofResponse struct {
$$$
proof $$$
proof_any $$$
$$$
}'
Length of output: 332
Script:
#!/bin/bash
# Search for v1.7.0 upgrade handler with different patterns
echo "Searching for v1.7.0 upgrade handler implementation..."
rg -l "v1.7.0" app/upgrades/
# Check the upgrade handler file content if exists
echo "Checking upgrade handler content..."
fd -t f "v1_7_0" app/upgrades/ -x cat {}
# Look for any proof related changes in the codebase
echo "Checking for proof related changes..."
rg -A 5 "proof_any|GetTxWithProofResponse" app/
Length of output: 428
Script:
#!/bin/bash
# List all files in upgrades directory to understand the structure
echo "Listing files in upgrades directory..."
fd . app/upgrades/
# Check content of types.go for context
echo "Checking types.go content..."
cat app/upgrades/types.go
# Search for proof-related types and changes
echo "Searching for proof-related types..."
rg -A 10 "type.*Proof" app/
Length of output: 3425
Script:
#!/bin/bash
# Check the v1.7.0 upgrade handler implementation
echo "Checking v1.7.0 upgrade handler content..."
cat app/upgrades/v1_7.go
# Look for any proof-related changes in the entire codebase with broader context
echo "Searching for proof-related changes..."
rg -B 2 -A 5 "proof.*any|celestia.*proof|inclusion.*proof" .
Length of output: 12285
x/interchainstaking/keeper/abci.go (3)
48-49
: LGTM! Appropriate error handling in BeginBlocker.
The error handling for HandleMaturedWithdrawals
follows best practices by logging errors without panicking, which is crucial in a BeginBlocker context.
48-49
: Verify integration with Celestia Inclusion proofs.
Since this PR introduces changes for Celestia Inclusion proofs, please verify that the new HandleMaturedWithdrawals
implementation properly integrates with the updated proof handling system.
55-70
: Verify the usage of -1 as timeoutTimestamp parameter.
The balance query implementation looks correct, but please verify if using -1
as the timeoutTimestamp
parameter is intentional and properly handled by the ICQKeeper.
proto/quicksilver/interchainstaking/v1/interchainstaking.proto (1)
142-145
: LGTM! Well-structured timestamp field addition.
The addition of the completion_time
field to UnbondingRecord
is well-implemented with appropriate protobuf options:
- Uses standard timestamp type
- Marked as non-nullable
- Consistent with similar fields in other messages
x/interchainstaking/keeper/keeper_test.go (1)
353-353
: Method rename improves clarity and matches functionality.
The rename from GetUnbondingTokensAndCount
to GetWithdrawnTokensAndCount
better reflects the method's purpose, and the test cases comprehensively cover various withdrawal scenarios.
x/interchainstaking/keeper/ibc_packet_handlers.go (3)
509-514
: LGTM! Good parameter validation.
The added validation ensures the validator set interval parameter doesn't exceed int64 range before casting, preventing potential integer overflow issues.
1511-1515
: Previous review comment is still applicable.
The current timestamp calculation can lead to integer overflow. The previous suggestion to use time.Time.Add
remains valid.
Also applies to: 1524-1524
811-818
: LGTM! Proper handling of unbonding record completion time.
The code correctly updates and stores the completion time for unbonding records.
docs/swagger.yml (1)
4821-4823
: LGTM: Properly defined completion_time field
The addition of the completion_time field to unbonding records is well-structured, using appropriate OpenAPI type definitions and format specifications.
app/upgrades/v1_7.go (4)
18-18
: Verify that isMainnet
and isTest
functions are defined and accessible
Ensure that the functions isMainnet(ctx)
and isTest(ctx)
are properly defined and imported. If they are custom functions, confirm that they are implemented correctly to determine the network context.
32-32
: Verify the correctness of the minted and transferred coin amounts
Confirm that minting and transferring 50699994 uqatom
is intended and aligns with the upgrade requirements. Ensure this action adheres to the chain's economic policies and does not unintentionally affect the token supply.
Also applies to: 36-36
41-44
: Ensure overriding redemption rates without a cap is intended
The call to OverrideRedemptionRateNoCap
on all zones overrides the redemption rates without any cap, which might significantly impact the zones' economies. Verify that this operation is intentional and that potential effects have been considered.
33-35
: 🛠️ Refactor suggestion
Avoid using panic
for error handling in upgrade handlers
Using panic(err)
in the upgrade handler can cause the node to crash if an error occurs, potentially leading to a chain halt. It's advisable to handle errors gracefully and return them so they can be logged and managed appropriately.
Consider modifying the error handling as follows:
if err != nil {
- panic(err)
+ return fromVM, err
}
Apply this change to both error checks to ensure consistent and safe error handling.
Also applies to: 37-39
⛔ Skipped due to learnings
Learnt from: joe-bowman
PR: quicksilver-zone/quicksilver#1099
File: app/upgrades/upgrades.go:0-0
Timestamp: 2024-11-12T07:06:41.714Z
Learning: Using `panic` in the context of upgrade handlers is acceptable to ensure the upgrade process is halted in case of critical issues.
Learnt from: joe-bowman
PR: quicksilver-zone/quicksilver#1099
File: app/upgrades/upgrades.go:0-0
Timestamp: 2024-11-12T07:06:35.954Z
Learning: Using `panic` in the context of upgrade handlers is acceptable to ensure the upgrade process is halted in case of critical issues.
Learnt from: joe-bowman
PR: quicksilver-zone/quicksilver#1099
File: app/upgrades/upgrades.go:0-0
Timestamp: 2024-11-12T07:06:35.954Z
Learning: Using `panic` in the context of upgrade handlers is acceptable to ensure the upgrade process is halted in case of critical issues.
Learnt from: joe-bowman
PR: quicksilver-zone/quicksilver#1099
File: app/upgrades/upgrades.go:0-0
Timestamp: 2024-11-12T07:06:41.714Z
Learning: Using `panic` in the context of upgrade handlers is acceptable to ensure the upgrade process is halted in case of critical issues.
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
This reverts commit 4990cf4.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 0
🧹 Outside diff range and nitpick comments (1)
x/interchainstaking/keeper/keeper_test.go (1)
529-529
: Consider documenting the ignored return valueThe
GetRatio
function now returns two values, but the second value is ignored. Consider adding a comment explaining what this ignored value represents for better code clarity.
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
⛔ Files ignored due to path filters (3)
go.sum
is excluded by!**/*.sum
go.work
is excluded by!**/*.work
go.work.sum
is excluded by!**/*.sum
📒 Files selected for processing (15)
.github/workflows/buildtest.yaml
(3 hunks).github/workflows/codeql.yml
(1 hunks).github/workflows/docker.yaml
(1 hunks).github/workflows/golangci.yml
(2 hunks).github/workflows/icq-relayer-build-test.yml
(2 hunks).github/workflows/simulation.yml
(1 hunks)Dockerfile
(2 hunks)go.mod
(9 hunks)icq-relayer/Dockerfile
(2 hunks)icq-relayer/go.mod
(6 hunks)icq-relayer/oldseed
(1 hunks)icq-relayer/pkg/types/config.go
(1 hunks)icq-relayer/seed
(1 hunks)proto/Dockerfile
(1 hunks)x/interchainstaking/keeper/keeper_test.go
(5 hunks)
✅ Files skipped from review due to trivial changes (2)
- icq-relayer/oldseed
- icq-relayer/seed
🚧 Files skipped from review as they are similar to previous changes (12)
- .github/workflows/buildtest.yaml
- .github/workflows/codeql.yml
- .github/workflows/docker.yaml
- .github/workflows/golangci.yml
- .github/workflows/icq-relayer-build-test.yml
- .github/workflows/simulation.yml
- Dockerfile
- go.mod
- icq-relayer/Dockerfile
- icq-relayer/go.mod
- icq-relayer/pkg/types/config.go
- proto/Dockerfile
🔇 Additional comments (3)
x/interchainstaking/keeper/keeper_test.go (3)
28-28
: LGTM: Import addition is correct
The addition of the minttypes import is properly formatted and necessary for the mint-related operations in the tests.
480-480
: LGTM: Precise decimal calculations
The ratio calculations use proper decimal arithmetic with sdk.NewDec
and Quo
operations, ensuring accurate results without floating-point precision issues.
Also applies to: 500-500
515-517
: LGTM: Proper sequence of mint and bank operations
The mint and bank operations are correctly implemented with proper error handling and sequencing:
- Minting coins first
- Followed by module-to-module transfer
Also applies to: 521-522
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 2
🧹 Outside diff range and nitpick comments (1)
utils/proofs/proofs.go (1)
42-49
: Optimize share slice allocationPre-allocate the shares slice to avoid unnecessary allocations during append operations.
- shares := []squareshare.Share{} + shares := make([]squareshare.Share, len(shareProof.Data)) for i, share := range shareProof.Data { sh, err := squareshare.NewShare(share) if err != nil { return nil, fmt.Errorf("unable to parse share %d: %w", i, err) } - shares = append(shares, *sh) + shares[i] = *sh }
📜 Review details
Configuration used: CodeRabbit UI
Review profile: CHILL
📒 Files selected for processing (1)
utils/proofs/proofs.go
(1 hunks)
🔇 Additional comments (4)
utils/proofs/proofs.go (4)
18-27
: LGTM! Clean interface design with compile-time type safety
The interface definition and type assertions are well-structured, providing clear contract requirements for implementations.
94-100
: LGTM! Correct interface registration
The implementation follows cosmos-sdk patterns for interface registration.
51-54
: 🛠️ Refactor suggestion
Add validation for empty transaction list
Add a check to ensure the parsed transactions list is not empty before proceeding.
txs, err := squareshare.ParseTxs(shares)
if err != nil {
return nil, fmt.Errorf("unable to parse txs from shareProof: %w", err)
}
+ if len(txs) == 0 {
+ return nil, fmt.Errorf("no transactions found in shares")
+ }
Likely invalid or redundant comment.
75-79
:
Add nil check for TxProof
Add a safety check to prevent nil pointer dereference.
func (p *TendermintProof) Validate(dataHash []byte, txHash string) ([]byte, error) {
+ if p.TxProof == nil {
+ return nil, fmt.Errorf("TxProof is nil")
+ }
tmproof, err := tmtypes.TxProofFromProto(*p.TxProof)
Likely invalid or redundant comment.
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
lgtm
// home, err := cmd.PersistentFlags().GetString(flags.FlagHome) | ||
// if err != nil { | ||
// return err | ||
// } | ||
// cfg = &types.Config{} | ||
// cfgPath := path.Join(home, "config.yaml") | ||
// _, err = os.Stat(cfgPath) | ||
// if err != nil { | ||
// if !os.IsNotExist(err) { // Return immediately | ||
// return err | ||
// } | ||
|
||
// if err := types.CreateConfig(home); err != nil { | ||
// return err | ||
// } | ||
// } | ||
|
||
// viper.SetConfigFile(cfgPath) | ||
// err = viper.ReadInConfig() | ||
// if err != nil { | ||
// fmt.Println("Failed to read in config:", err) | ||
// os.Exit(1) | ||
// } | ||
|
||
// read the config file bytes | ||
file, err := os.ReadFile(viper.ConfigFileUsed()) | ||
if err != nil { | ||
fmt.Println("Error reading file:", err) | ||
os.Exit(1) | ||
} | ||
|
||
// unmarshall them into the struct | ||
if err = yaml.Unmarshal(file, cfg); err != nil { | ||
fmt.Println("Error unmarshalling config:", err) | ||
os.Exit(1) | ||
} | ||
|
||
// instantiate chain client | ||
// TODO: this is a bit of a hack, we should probably have a | ||
// better way to inject modules into the client | ||
cfg.Cl = make(map[string]*client.ChainClient) | ||
for name, chain := range cfg.Chains { | ||
chain.Modules = append([]module.AppModuleBasic{}, ModuleBasics...) | ||
cl, err := client.NewChainClient(nil, chain, home, os.Stdin, os.Stdout) | ||
if err != nil { | ||
fmt.Println("Error creating chain client:", err) | ||
os.Exit(1) | ||
} | ||
cfg.Cl[name] = cl | ||
} | ||
|
||
// override chain if needed | ||
if cmd.PersistentFlags().Changed("chain") { | ||
defaultChain, err := cmd.PersistentFlags().GetString("chain") | ||
if err != nil { | ||
return err | ||
} | ||
|
||
cfg.DefaultChain = defaultChain | ||
} | ||
|
||
if cmd.PersistentFlags().Changed("output") { | ||
output, err := cmd.PersistentFlags().GetString("output") | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Should output be a global configuration item? | ||
for chain := range cfg.Chains { | ||
cfg.Chains[chain].OutputFormat = output | ||
} | ||
} | ||
// file, err := os.ReadFile(viper.ConfigFileUsed()) | ||
// if err != nil { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
reason behind commenting?
1. Summary
2.Type of change
3. Implementation details
The field
proof
is now deprecated inx/interchainquery/types.GetTxWithProofResponse
, and replaced byproof_any
of typrAny
. This should be populated with a proto-encoded message providing Interfaceutils/proofs.InclusionProof
. Current implementation of this includeTendermintProof
, which consists of agithub.com/tendermint/tendermint/types.TxProof
(as previously was passed directly inGetTxWithProofResponse
, orCelestiaProof
which includes a Celestia ShareProof (imported from third-party-chains to avoid tendermint/comet conflicts) and a uint32 Index. TheInclusionProof
interface has a single methodValidate(datahash []byte)
which takes the block header DataHash, and returns(txBytes []byte, err error)
. The transaction data in txByte is valid for the given proof and verified to have been included on the host chain.BREAKING CHANGE
icq-relayer v1.1.0+ must be used with quicksilver v1.7.0 onwards. Previous versions of icq-relayer will not submit query responses in the correct form.
Summary by CodeRabbit
Summary by CodeRabbit
New Features
RowProof
andShareProof
types for managing Merkle proofs.Bug Fixes
Documentation
Chores
.gitignore
file to ignore specific build-related files.